2015-08-21 07:04:50 +00:00
|
|
|
/*
|
|
|
|
* Physical memory management
|
|
|
|
*
|
|
|
|
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */
|
|
|
|
|
2018-02-15 02:41:22 +00:00
|
|
|
#include "qemu/osdep.h"
|
2018-02-24 07:26:26 +00:00
|
|
|
#include "qapi/error.h"
|
2018-02-24 06:23:15 +00:00
|
|
|
#include "qemu-common.h"
|
|
|
|
#include "cpu.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Backports commit da34e65cb4025728566d6504a99916f6e7e1dd6a from qemu
2018-02-22 04:05:15 +00:00
|
|
|
#include "qapi/error.h"
|
2018-02-24 07:26:26 +00:00
|
|
|
#include "exec/exec-all.h"
|
2015-08-21 07:04:50 +00:00
|
|
|
#include "exec/memory.h"
|
|
|
|
#include "exec/address-spaces.h"
|
|
|
|
#include "exec/ioport.h"
|
|
|
|
#include "qapi/visitor.h"
|
|
|
|
#include "qemu/bitops.h"
|
|
|
|
#include "qom/object.h"
|
|
|
|
|
|
|
|
#include "exec/memory-internal.h"
|
|
|
|
#include "exec/ram_addr.h"
|
|
|
|
#include "sysemu/sysemu.h"
|
|
|
|
|
|
|
|
//#define DEBUG_UNASSIGNED
|
|
|
|
|
2018-02-13 16:30:26 +00:00
|
|
|
#define RAM_ADDR_INVALID (~(ram_addr_t)0)
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
// Unicorn engine
|
2016-08-27 13:49:11 +00:00
|
|
|
MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2015-08-30 02:51:28 +00:00
|
|
|
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-03-04 03:23:26 +00:00
|
|
|
memory_region_init_ram_nomigrate(uc, ram, NULL, "pc.ram", size, perms, &error_abort);
|
2018-02-21 13:07:39 +00:00
|
|
|
if (ram->ram_block == NULL) {
|
2015-11-10 03:44:29 +00:00
|
|
|
// out of memory
|
|
|
|
return NULL;
|
2018-02-21 13:07:39 +00:00
|
|
|
}
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2015-08-30 02:51:28 +00:00
|
|
|
memory_region_add_subregion(get_system_memory(uc), begin, ram);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
if (uc->current_cpu)
|
2018-03-02 00:35:21 +00:00
|
|
|
tlb_flush(uc->current_cpu);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2015-08-30 02:51:28 +00:00
|
|
|
return ram;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2016-08-27 13:49:11 +00:00
|
|
|
MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr)
|
2015-11-28 01:25:53 +00:00
|
|
|
{
|
|
|
|
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
|
|
|
|
|
|
|
memory_region_init_ram_ptr(uc, ram, NULL, "pc.ram", size, ptr);
|
2015-11-28 09:36:11 +00:00
|
|
|
ram->perms = perms;
|
2018-02-21 13:07:39 +00:00
|
|
|
if (ram->ram_block == NULL) {
|
2015-11-28 01:25:53 +00:00
|
|
|
// out of memory
|
|
|
|
return NULL;
|
2018-02-21 13:07:39 +00:00
|
|
|
}
|
2015-11-28 01:25:53 +00:00
|
|
|
|
|
|
|
memory_region_add_subregion(get_system_memory(uc), begin, ram);
|
|
|
|
|
|
|
|
if (uc->current_cpu)
|
2018-03-02 00:35:21 +00:00
|
|
|
tlb_flush(uc->current_cpu);
|
2015-11-28 01:25:53 +00:00
|
|
|
|
|
|
|
return ram;
|
|
|
|
}
|
|
|
|
|
2016-10-20 10:33:37 +00:00
|
|
|
static void memory_region_update_container_subregions(MemoryRegion *subregion);
|
|
|
|
|
2015-08-30 04:17:30 +00:00
|
|
|
void memory_unmap(struct uc_struct *uc, MemoryRegion *mr)
|
|
|
|
{
|
2015-09-03 19:26:36 +00:00
|
|
|
int i;
|
2015-08-30 07:22:18 +00:00
|
|
|
target_ulong addr;
|
2016-01-07 23:41:45 +00:00
|
|
|
Object *obj;
|
2015-10-27 17:26:59 +00:00
|
|
|
|
2015-09-04 07:48:24 +00:00
|
|
|
// Make sure all pages associated with the MemoryRegion are flushed
|
|
|
|
// Only need to do this if we are in a running state
|
|
|
|
if (uc->current_cpu) {
|
|
|
|
for (addr = mr->addr; addr < mr->end; addr += uc->target_page_size) {
|
|
|
|
tlb_flush_page(uc->current_cpu, addr);
|
|
|
|
}
|
2015-08-30 04:17:30 +00:00
|
|
|
}
|
|
|
|
memory_region_del_subregion(get_system_memory(uc), mr);
|
2015-09-03 19:26:36 +00:00
|
|
|
|
|
|
|
for (i = 0; i < uc->mapped_block_count; i++) {
|
|
|
|
if (uc->mapped_blocks[i] == mr) {
|
|
|
|
uc->mapped_block_count--;
|
|
|
|
//shift remainder of array down over deleted pointer
|
2016-02-11 15:38:50 +00:00
|
|
|
memmove(&uc->mapped_blocks[i], &uc->mapped_blocks[i + 1], sizeof(MemoryRegion*) * (uc->mapped_block_count - i));
|
2015-12-11 00:42:31 +00:00
|
|
|
mr->destructor(mr);
|
2016-01-07 23:41:45 +00:00
|
|
|
obj = OBJECT(mr);
|
|
|
|
obj->ref = 1;
|
2016-12-21 14:28:36 +00:00
|
|
|
obj->free = g_free;
|
|
|
|
g_free((char *)mr->name);
|
2016-01-07 23:41:45 +00:00
|
|
|
mr->name = NULL;
|
2016-10-20 10:33:37 +00:00
|
|
|
object_property_del_child(mr->uc, qdev_get_machine(mr->uc), obj, &error_abort);
|
2015-09-03 19:26:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-08-30 04:17:30 +00:00
|
|
|
}
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
int memory_free(struct uc_struct *uc)
|
|
|
|
{
|
2015-12-11 20:35:25 +00:00
|
|
|
MemoryRegion *mr;
|
2016-01-07 23:41:45 +00:00
|
|
|
Object *obj;
|
2015-08-26 21:09:46 +00:00
|
|
|
int i;
|
2015-12-11 03:25:35 +00:00
|
|
|
|
2015-08-26 21:09:46 +00:00
|
|
|
for (i = 0; i < uc->mapped_block_count; i++) {
|
2015-12-11 20:35:25 +00:00
|
|
|
mr = uc->mapped_blocks[i];
|
|
|
|
mr->enabled = false;
|
|
|
|
memory_region_del_subregion(get_system_memory(uc), mr);
|
|
|
|
mr->destructor(mr);
|
2016-01-07 23:41:45 +00:00
|
|
|
obj = OBJECT(mr);
|
|
|
|
obj->ref = 1;
|
2016-12-21 14:28:36 +00:00
|
|
|
obj->free = g_free;
|
2016-10-20 10:33:37 +00:00
|
|
|
object_property_del_child(mr->uc, qdev_get_machine(mr->uc), obj, &error_abort);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
2015-12-11 03:25:35 +00:00
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_init(struct uc_struct *uc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct AddrRange AddrRange;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that signed integers are needed for negative offsetting in aliases
|
|
|
|
* (large MemoryRegion::alias_offset).
|
|
|
|
*/
|
|
|
|
struct AddrRange {
|
|
|
|
Int128 start;
|
|
|
|
Int128 size;
|
|
|
|
};
|
|
|
|
|
|
|
|
static AddrRange addrrange_make(Int128 start, Int128 size)
|
|
|
|
{
|
2017-01-19 11:50:28 +00:00
|
|
|
AddrRange ar;
|
2017-01-21 01:28:22 +00:00
|
|
|
ar.start = start;
|
|
|
|
ar.size = size;
|
|
|
|
return ar;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool addrrange_equal(AddrRange r1, AddrRange r2)
|
|
|
|
{
|
|
|
|
return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Int128 addrrange_end(AddrRange r)
|
|
|
|
{
|
|
|
|
return int128_add(r.start, r.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool addrrange_contains(AddrRange range, Int128 addr)
|
|
|
|
{
|
|
|
|
return int128_ge(addr, range.start)
|
|
|
|
&& int128_lt(addr, addrrange_end(range));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
|
|
|
|
{
|
|
|
|
return addrrange_contains(r1, r2.start)
|
|
|
|
|| addrrange_contains(r2, r1.start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
|
|
|
|
{
|
|
|
|
Int128 start = int128_max(r1.start, r2.start);
|
|
|
|
Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
|
|
|
|
return addrrange_make(start, int128_sub(end, start));
|
|
|
|
}
|
|
|
|
|
|
|
|
enum ListenerDirection { Forward, Reverse };
|
|
|
|
|
2017-01-19 11:50:28 +00:00
|
|
|
#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, ...) \
|
2015-08-21 07:04:50 +00:00
|
|
|
do { \
|
|
|
|
MemoryListener *_listener; \
|
|
|
|
\
|
|
|
|
switch (_direction) { \
|
|
|
|
case Forward: \
|
|
|
|
QTAILQ_FOREACH(_listener, &uc->memory_listeners, link) { \
|
|
|
|
if (_listener->_callback) { \
|
2017-01-19 11:50:28 +00:00
|
|
|
_listener->_callback(_listener, ##__VA_ARGS__); \
|
2015-08-21 07:04:50 +00:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
case Reverse: \
|
|
|
|
QTAILQ_FOREACH_REVERSE(_listener, &uc->memory_listeners, \
|
|
|
|
memory_listeners, link) { \
|
|
|
|
if (_listener->_callback) { \
|
2017-01-19 11:50:28 +00:00
|
|
|
_listener->_callback(_listener, ##__VA_ARGS__); \
|
2015-08-21 07:04:50 +00:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2018-02-26 15:33:15 +00:00
|
|
|
#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, ...) \
|
2015-08-21 07:04:50 +00:00
|
|
|
do { \
|
|
|
|
MemoryListener *_listener; \
|
2018-02-26 15:33:15 +00:00
|
|
|
struct memory_listeners_as *list = &(_as)->listeners; \
|
2015-08-21 07:04:50 +00:00
|
|
|
\
|
|
|
|
switch (_direction) { \
|
|
|
|
case Forward: \
|
2018-02-26 15:33:15 +00:00
|
|
|
QTAILQ_FOREACH(_listener, list, link_as) { \
|
|
|
|
if (_listener->_callback) { \
|
2017-01-19 11:50:28 +00:00
|
|
|
_listener->_callback(_listener, _section, ##__VA_ARGS__); \
|
2015-08-21 07:04:50 +00:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
case Reverse: \
|
2018-02-26 15:33:15 +00:00
|
|
|
QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
|
|
|
|
link_as) { \
|
|
|
|
if (_listener->_callback) { \
|
2017-01-19 11:50:28 +00:00
|
|
|
_listener->_callback(_listener, _section, ##__VA_ARGS__); \
|
2015-08-21 07:04:50 +00:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
|
2018-02-26 15:33:15 +00:00
|
|
|
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, ...) \
|
|
|
|
do { \
|
|
|
|
MemoryRegionSection mrs = section_from_flat_range(fr, as); \
|
|
|
|
MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##__VA_ARGS__); \
|
|
|
|
} while(0);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
typedef struct FlatRange FlatRange;
|
|
|
|
typedef struct FlatView FlatView;
|
|
|
|
|
|
|
|
/* Range of memory in the global map. Addresses are absolute. */
|
|
|
|
struct FlatRange {
|
|
|
|
MemoryRegion *mr;
|
|
|
|
hwaddr offset_in_region;
|
|
|
|
AddrRange addr;
|
|
|
|
uint8_t dirty_log_mask;
|
|
|
|
bool readonly;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Flattened global view of current active memory hierarchy. Kept in sorted
|
|
|
|
* order.
|
|
|
|
*/
|
|
|
|
struct FlatView {
|
|
|
|
unsigned ref;
|
|
|
|
FlatRange *ranges;
|
|
|
|
unsigned nr;
|
|
|
|
unsigned nr_allocated;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct AddressSpaceOps AddressSpaceOps;
|
|
|
|
|
|
|
|
#define FOR_EACH_FLAT_RANGE(var, view) \
|
|
|
|
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
|
|
|
|
2018-02-26 15:33:15 +00:00
|
|
|
static inline MemoryRegionSection
|
|
|
|
section_from_flat_range(FlatRange *fr, AddressSpace *as)
|
|
|
|
{
|
|
|
|
MemoryRegionSection s = {0};
|
|
|
|
s.mr = fr->mr;
|
|
|
|
s.address_space = as;
|
|
|
|
s.offset_within_region = fr->offset_in_region;
|
|
|
|
s.size = fr->addr.size;
|
|
|
|
s.offset_within_address_space = int128_get64(fr->addr.start);
|
|
|
|
s.readonly = fr->readonly;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
static bool flatrange_equal(FlatRange *a, FlatRange *b)
|
|
|
|
{
|
|
|
|
return a->mr == b->mr
|
|
|
|
&& addrrange_equal(a->addr, b->addr)
|
|
|
|
&& a->offset_in_region == b->offset_in_region
|
|
|
|
&& a->readonly == b->readonly;
|
|
|
|
}
|
|
|
|
|
2018-03-04 07:08:25 +00:00
|
|
|
static FlatView *flatview_new(void)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2018-03-04 07:08:25 +00:00
|
|
|
FlatView *view;
|
|
|
|
|
|
|
|
view = g_new0(FlatView, 1);
|
2015-08-21 07:04:50 +00:00
|
|
|
view->ref = 1;
|
2018-03-04 07:08:25 +00:00
|
|
|
|
|
|
|
return view;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert a range into a given position. Caller is responsible for maintaining
|
|
|
|
* sorting order.
|
|
|
|
*/
|
|
|
|
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
|
|
|
|
{
|
|
|
|
if (view->nr == view->nr_allocated) {
|
|
|
|
view->nr_allocated = MAX(2 * view->nr, 10);
|
|
|
|
view->ranges = g_realloc(view->ranges,
|
|
|
|
view->nr_allocated * sizeof(*view->ranges));
|
|
|
|
}
|
|
|
|
memmove(view->ranges + pos + 1, view->ranges + pos,
|
|
|
|
(view->nr - pos) * sizeof(FlatRange));
|
|
|
|
view->ranges[pos] = *range;
|
|
|
|
memory_region_ref(range->mr);
|
|
|
|
++view->nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void flatview_destroy(FlatView *view)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < view->nr; i++) {
|
|
|
|
memory_region_unref(view->ranges[i].mr);
|
|
|
|
}
|
2016-12-21 14:28:36 +00:00
|
|
|
g_free(view->ranges);
|
|
|
|
g_free(view);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void flatview_ref(FlatView *view)
|
|
|
|
{
|
2017-01-21 01:28:22 +00:00
|
|
|
atomic_inc(&view->ref);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void flatview_unref(FlatView *view)
|
|
|
|
{
|
|
|
|
if (atomic_fetch_dec(&view->ref) == 1) {
|
|
|
|
flatview_destroy(view);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool can_merge(FlatRange *r1, FlatRange *r2)
|
|
|
|
{
|
|
|
|
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
|
|
|
|
&& r1->mr == r2->mr
|
|
|
|
&& int128_eq(int128_add(int128_make64(r1->offset_in_region),
|
|
|
|
r1->addr.size),
|
|
|
|
int128_make64(r2->offset_in_region))
|
|
|
|
&& r1->dirty_log_mask == r2->dirty_log_mask
|
|
|
|
&& r1->readonly == r2->readonly;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Attempt to simplify a view by merging adjacent ranges */
|
|
|
|
static void flatview_simplify(FlatView *view)
|
|
|
|
{
|
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
while (i < view->nr) {
|
|
|
|
j = i + 1;
|
|
|
|
while (j < view->nr
|
|
|
|
&& can_merge(&view->ranges[j-1], &view->ranges[j])) {
|
|
|
|
int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
|
|
|
|
++j;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
memmove(&view->ranges[i], &view->ranges[j],
|
|
|
|
(view->nr - j) * sizeof(view->ranges[j]));
|
|
|
|
view->nr -= j - i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool memory_region_big_endian(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
#ifdef TARGET_WORDS_BIGENDIAN
|
|
|
|
return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
|
|
|
|
#else
|
|
|
|
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool memory_region_wrong_endianness(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
#ifdef TARGET_WORDS_BIGENDIAN
|
|
|
|
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
|
|
|
|
#else
|
|
|
|
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
|
|
|
|
{
|
|
|
|
if (memory_region_wrong_endianness(mr)) {
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
*data = bswap16(*data);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
*data = bswap32(*data);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
*data = bswap64(*data);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned shift,
|
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
|
2018-02-12 21:56:31 +00:00
|
|
|
// UNICORN: Commented out
|
|
|
|
//trace_memory_region_ops_read(mr, addr, tmp, size);
|
2015-08-21 07:04:50 +00:00
|
|
|
*value |= (tmp & mask) << shift;
|
2018-02-12 21:56:31 +00:00
|
|
|
return MEMTX_OK;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned shift,
|
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
uint64_t tmp;
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
// UNICORN: Commented out
|
|
|
|
//if (mr->flush_coalesced_mmio) {
|
|
|
|
// qemu_flush_coalesced_mmio_buffer();
|
|
|
|
//}
|
2015-08-21 07:04:50 +00:00
|
|
|
tmp = mr->ops->read(mr->uc, mr->opaque, addr, size);
|
|
|
|
*value |= (tmp & mask) << shift;
|
2018-02-12 21:56:31 +00:00
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned shift,
|
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
uint64_t tmp = 0;
|
|
|
|
MemTxResult r;
|
|
|
|
|
|
|
|
// UNICORN: commented out
|
|
|
|
//if (mr->flush_coalesced_mmio) {
|
|
|
|
// qemu_flush_coalesced_mmio_buffer();
|
|
|
|
//}
|
|
|
|
r = mr->ops->read_with_attrs(mr->uc, mr->opaque, addr, &tmp, size, attrs);
|
|
|
|
// UNICORN: Commented out
|
|
|
|
//trace_memory_region_ops_read(mr, addr, tmp, size);
|
|
|
|
*value |= (tmp & mask) << shift;
|
|
|
|
return r;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned shift,
|
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
tmp = (*value >> shift) & mask;
|
|
|
|
mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
|
2018-02-12 21:56:31 +00:00
|
|
|
return MEMTX_OK;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned shift,
|
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
tmp = (*value >> shift) & mask;
|
|
|
|
mr->ops->write(mr->uc, mr->opaque, addr, tmp, size);
|
2018-02-12 21:56:31 +00:00
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned shift,
|
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
// UNICORN: Commented out
|
|
|
|
//if (mr->flush_coalesced_mmio) {
|
|
|
|
// qemu_flush_coalesced_mmio_buffer();
|
|
|
|
//}
|
|
|
|
tmp = (*value >> shift) & mask;
|
|
|
|
// UNICORN: Commented out
|
|
|
|
//trace_memory_region_ops_write(mr, addr, tmp, size);
|
|
|
|
return mr->ops->write_with_attrs(mr->uc, mr->opaque, addr, tmp, size, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
static MemTxResult access_with_adjusted_size(hwaddr addr,
|
2015-08-21 07:04:50 +00:00
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned access_size_min,
|
|
|
|
unsigned access_size_max,
|
2018-03-05 06:12:18 +00:00
|
|
|
MemTxResult (*access_fn)
|
|
|
|
(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *value,
|
|
|
|
unsigned size,
|
|
|
|
unsigned shift,
|
|
|
|
uint64_t mask,
|
|
|
|
MemTxAttrs attrs),
|
2018-02-12 21:56:31 +00:00
|
|
|
MemoryRegion *mr,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
uint64_t access_mask;
|
|
|
|
unsigned access_size;
|
|
|
|
unsigned i;
|
2018-02-12 21:56:31 +00:00
|
|
|
MemTxResult r = MEMTX_OK;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
if (!access_size_min) {
|
|
|
|
access_size_min = 1;
|
|
|
|
}
|
|
|
|
if (!access_size_max) {
|
|
|
|
access_size_max = 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: support unaligned access? */
|
|
|
|
access_size = MAX(MIN(size, access_size_max), access_size_min);
|
2017-01-19 11:50:28 +00:00
|
|
|
access_mask = (0-1ULL) >> (64 - access_size * 8);
|
2015-08-21 07:04:50 +00:00
|
|
|
if (memory_region_big_endian(mr)) {
|
|
|
|
for (i = 0; i < size; i += access_size) {
|
2018-03-05 06:12:18 +00:00
|
|
|
r |= access_fn(mr, addr + i, value, access_size,
|
|
|
|
(size - access_size - i) * 8, access_mask, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < size; i += access_size) {
|
2018-03-05 06:12:18 +00:00
|
|
|
r |= access_fn(mr, addr + i, value, access_size, i * 8,
|
|
|
|
access_mask, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-12 21:56:31 +00:00
|
|
|
return r;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
AddressSpace *as;
|
|
|
|
|
|
|
|
while (mr->container) {
|
|
|
|
mr = mr->container;
|
|
|
|
}
|
|
|
|
QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) {
|
|
|
|
if (mr == as->root) {
|
|
|
|
return as;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Render a memory region into the global view. Ranges in @view obscure
|
|
|
|
* ranges in @mr.
|
|
|
|
*/
|
|
|
|
static void render_memory_region(FlatView *view,
|
|
|
|
MemoryRegion *mr,
|
|
|
|
Int128 base,
|
|
|
|
AddrRange clip,
|
|
|
|
bool readonly)
|
|
|
|
{
|
|
|
|
MemoryRegion *subregion;
|
|
|
|
unsigned i;
|
|
|
|
hwaddr offset_in_region;
|
|
|
|
Int128 remain;
|
|
|
|
Int128 now;
|
|
|
|
FlatRange fr;
|
|
|
|
AddrRange tmp;
|
|
|
|
|
|
|
|
if (!mr->enabled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int128_addto(&base, int128_make64(mr->addr));
|
|
|
|
readonly |= mr->readonly;
|
|
|
|
|
|
|
|
tmp = addrrange_make(base, mr->size);
|
|
|
|
|
|
|
|
if (!addrrange_intersects(tmp, clip)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
clip = addrrange_intersection(tmp, clip);
|
|
|
|
|
|
|
|
if (mr->alias) {
|
|
|
|
int128_subfrom(&base, int128_make64(mr->alias->addr));
|
|
|
|
int128_subfrom(&base, int128_make64(mr->alias_offset));
|
|
|
|
render_memory_region(view, mr->alias, base, clip, readonly);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Render subregions in priority order. */
|
|
|
|
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
|
|
|
|
render_memory_region(view, subregion, base, clip, readonly);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mr->terminates) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset_in_region = int128_get64(int128_sub(clip.start, base));
|
|
|
|
base = clip.start;
|
|
|
|
remain = clip.size;
|
|
|
|
|
|
|
|
fr.mr = mr;
|
|
|
|
fr.dirty_log_mask = mr->dirty_log_mask;
|
|
|
|
fr.readonly = readonly;
|
|
|
|
|
|
|
|
/* Render the region itself into any gaps left by the current view. */
|
|
|
|
for (i = 0; i < view->nr && int128_nz(remain); ++i) {
|
|
|
|
if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (int128_lt(base, view->ranges[i].addr.start)) {
|
|
|
|
now = int128_min(remain,
|
|
|
|
int128_sub(view->ranges[i].addr.start, base));
|
|
|
|
fr.offset_in_region = offset_in_region;
|
|
|
|
fr.addr = addrrange_make(base, now);
|
|
|
|
flatview_insert(view, i, &fr);
|
|
|
|
++i;
|
|
|
|
int128_addto(&base, now);
|
|
|
|
offset_in_region += int128_get64(now);
|
|
|
|
int128_subfrom(&remain, now);
|
|
|
|
}
|
|
|
|
now = int128_sub(int128_min(int128_add(base, remain),
|
|
|
|
addrrange_end(view->ranges[i].addr)),
|
|
|
|
base);
|
|
|
|
int128_addto(&base, now);
|
|
|
|
offset_in_region += int128_get64(now);
|
|
|
|
int128_subfrom(&remain, now);
|
|
|
|
}
|
|
|
|
if (int128_nz(remain)) {
|
|
|
|
fr.offset_in_region = offset_in_region;
|
|
|
|
fr.addr = addrrange_make(base, remain);
|
|
|
|
flatview_insert(view, i, &fr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Render a memory topology into a list of disjoint absolute ranges. */
|
|
|
|
static FlatView *generate_memory_topology(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
FlatView *view;
|
|
|
|
|
2018-03-04 07:08:25 +00:00
|
|
|
view = flatview_new();
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
if (mr) {
|
|
|
|
render_memory_region(view, mr, int128_zero(),
|
|
|
|
addrrange_make(int128_zero(), int128_2_64()), false);
|
|
|
|
}
|
|
|
|
flatview_simplify(view);
|
|
|
|
|
|
|
|
return view;
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlatView *address_space_get_flatview(AddressSpace *as)
|
|
|
|
{
|
|
|
|
FlatView *view;
|
|
|
|
|
|
|
|
view = as->current_map;
|
|
|
|
flatview_ref(view);
|
|
|
|
return view;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void address_space_update_topology_pass(AddressSpace *as,
|
|
|
|
const FlatView *old_view,
|
|
|
|
const FlatView *new_view,
|
|
|
|
bool adding)
|
|
|
|
{
|
|
|
|
unsigned iold, inew;
|
|
|
|
FlatRange *frold, *frnew;
|
|
|
|
|
|
|
|
/* Generate a symmetric difference of the old and new memory maps.
|
|
|
|
* Kill ranges in the old map, and instantiate ranges in the new map.
|
|
|
|
*/
|
|
|
|
iold = inew = 0;
|
|
|
|
while (iold < old_view->nr || inew < new_view->nr) {
|
|
|
|
if (iold < old_view->nr) {
|
|
|
|
frold = &old_view->ranges[iold];
|
|
|
|
} else {
|
|
|
|
frold = NULL;
|
|
|
|
}
|
|
|
|
if (inew < new_view->nr) {
|
|
|
|
frnew = &new_view->ranges[inew];
|
|
|
|
} else {
|
|
|
|
frnew = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frold
|
|
|
|
&& (!frnew
|
|
|
|
|| int128_lt(frold->addr.start, frnew->addr.start)
|
|
|
|
|| (int128_eq(frold->addr.start, frnew->addr.start)
|
|
|
|
&& !flatrange_equal(frold, frnew)))) {
|
|
|
|
/* In old but not in new, or in both but attributes changed. */
|
|
|
|
|
|
|
|
if (!adding) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
|
|
|
|
}
|
|
|
|
|
|
|
|
++iold;
|
|
|
|
} else if (frold && frnew && flatrange_equal(frold, frnew)) {
|
|
|
|
/* In both and unchanged (except logging may have changed) */
|
|
|
|
|
|
|
|
if (adding) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
|
2018-02-13 13:52:16 +00:00
|
|
|
if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
|
|
|
|
frold->dirty_log_mask,
|
|
|
|
frnew->dirty_log_mask);
|
|
|
|
}
|
|
|
|
if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
|
|
|
|
frold->dirty_log_mask,
|
|
|
|
frnew->dirty_log_mask);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
++iold;
|
|
|
|
++inew;
|
|
|
|
} else {
|
|
|
|
/* In new */
|
|
|
|
|
|
|
|
if (adding) {
|
|
|
|
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
|
|
|
|
}
|
|
|
|
|
|
|
|
++inew;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void address_space_update_topology(AddressSpace *as)
|
|
|
|
{
|
|
|
|
FlatView *old_view = address_space_get_flatview(as);
|
|
|
|
FlatView *new_view = generate_memory_topology(as->root);
|
2018-03-04 07:02:41 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
mem_begin(as);
|
|
|
|
for (i = 0; i < new_view->nr; i++) {
|
|
|
|
MemoryRegionSection mrs =
|
|
|
|
section_from_flat_range(&new_view->ranges[i], as);
|
|
|
|
mem_add(as, &mrs);
|
|
|
|
}
|
|
|
|
mem_commit(as);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-03-04 07:02:41 +00:00
|
|
|
if (!QTAILQ_EMPTY(&as->listeners)) {
|
|
|
|
address_space_update_topology_pass(as, old_view, new_view, false);
|
|
|
|
address_space_update_topology_pass(as, old_view, new_view, true);
|
|
|
|
}
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
flatview_unref(as->current_map);
|
|
|
|
as->current_map = new_view;
|
|
|
|
|
|
|
|
/* Note that all the old MemoryRegions are still alive up to this
|
|
|
|
* point. This relieves most MemoryListeners from the need to
|
|
|
|
* ref/unref the MemoryRegions they get---unless they use them
|
|
|
|
* outside the iothread mutex, in which case precise reference
|
|
|
|
* counting is necessary.
|
|
|
|
*/
|
|
|
|
flatview_unref(old_view);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_transaction_begin(struct uc_struct *uc)
|
|
|
|
{
|
|
|
|
++uc->memory_region_transaction_depth;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_clear_pending(struct uc_struct *uc)
|
|
|
|
{
|
|
|
|
uc->memory_region_update_pending = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_transaction_commit(struct uc_struct *uc)
|
|
|
|
{
|
|
|
|
AddressSpace *as;
|
|
|
|
|
|
|
|
assert(uc->memory_region_transaction_depth);
|
|
|
|
--uc->memory_region_transaction_depth;
|
|
|
|
if (!uc->memory_region_transaction_depth) {
|
|
|
|
if (uc->memory_region_update_pending) {
|
|
|
|
MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) {
|
|
|
|
address_space_update_topology(as);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
|
|
|
|
}
|
|
|
|
memory_region_clear_pending(uc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_destructor_none(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_destructor_ram(MemoryRegion *mr)
|
|
|
|
{
|
2018-02-21 13:07:39 +00:00
|
|
|
qemu_ram_free(mr->uc, memory_region_get_ram_addr(mr));
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool memory_region_need_escape(char c)
|
|
|
|
{
|
|
|
|
return c == '/' || c == '[' || c == '\\' || c == ']';
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *memory_region_escape_name(const char *name)
|
|
|
|
{
|
|
|
|
const char *p;
|
|
|
|
char *escaped, *q;
|
|
|
|
uint8_t c;
|
|
|
|
size_t bytes = 0;
|
|
|
|
|
|
|
|
for (p = name; *p; p++) {
|
|
|
|
bytes += memory_region_need_escape(*p) ? 4 : 1;
|
|
|
|
}
|
|
|
|
if (bytes == p - name) {
|
|
|
|
return g_memdup(name, bytes + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
escaped = g_malloc(bytes + 1);
|
|
|
|
for (p = name, q = escaped; *p; p++) {
|
|
|
|
c = *p;
|
|
|
|
if (unlikely(memory_region_need_escape(c))) {
|
|
|
|
*q++ = '\\';
|
|
|
|
*q++ = 'x';
|
|
|
|
*q++ = "0123456789abcdef"[c >> 4];
|
|
|
|
c = "0123456789abcdef"[c & 15];
|
|
|
|
}
|
|
|
|
*q++ = c;
|
|
|
|
}
|
|
|
|
*q = 0;
|
|
|
|
return escaped;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_init(struct uc_struct *uc, MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size)
|
|
|
|
{
|
|
|
|
object_initialize(uc, mr, sizeof(*mr), TYPE_MEMORY_REGION);
|
|
|
|
mr->uc = uc;
|
|
|
|
mr->size = int128_make64(size);
|
|
|
|
if (size == UINT64_MAX) {
|
|
|
|
mr->size = int128_2_64();
|
|
|
|
}
|
|
|
|
mr->name = g_strdup(name);
|
2018-02-18 01:09:58 +00:00
|
|
|
mr->owner = owner;
|
2018-02-21 01:43:19 +00:00
|
|
|
mr->ram_block = NULL;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
if (name) {
|
|
|
|
char *escaped_name = memory_region_escape_name(name);
|
|
|
|
char *name_array = g_strdup_printf("%s[*]", escaped_name);
|
2018-02-18 01:09:58 +00:00
|
|
|
|
|
|
|
if (!owner) {
|
|
|
|
owner = qdev_get_machine(uc);
|
|
|
|
uc->owner = owner;
|
|
|
|
}
|
|
|
|
|
2018-02-21 18:58:46 +00:00
|
|
|
object_property_add_child(uc, owner, name_array, OBJECT(mr), &error_abort);
|
2015-08-21 07:04:50 +00:00
|
|
|
object_unref(uc, OBJECT(mr));
|
2016-12-21 14:28:36 +00:00
|
|
|
g_free(name_array);
|
|
|
|
g_free(escaped_name);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Backports commit d7bce9999df85c56c8cb1fcffd944d51bff8ff48 from qemu
2018-02-20 03:57:03 +00:00
|
|
|
static void memory_region_get_addr(struct uc_struct *uc,
|
|
|
|
Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(uc, obj);
|
|
|
|
uint64_t value = mr->addr;
|
|
|
|
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 03:31:04 +00:00
|
|
|
visit_type_uint64(v, name, &value, errp);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Backports commit d7bce9999df85c56c8cb1fcffd944d51bff8ff48 from qemu
2018-02-20 03:57:03 +00:00
|
|
|
static void memory_region_get_container(struct uc_struct *uc,
|
|
|
|
Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(uc, obj);
|
|
|
|
gchar *path = (gchar *)"";
|
|
|
|
|
|
|
|
if (mr->container) {
|
|
|
|
path = object_get_canonical_path(OBJECT(mr->container));
|
|
|
|
}
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 03:31:04 +00:00
|
|
|
visit_type_str(v, name, &path, errp);
|
2015-08-21 07:04:50 +00:00
|
|
|
if (mr->container) {
|
2016-12-21 14:28:36 +00:00
|
|
|
g_free(path);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static Object *memory_region_resolve_container(struct uc_struct *uc, Object *obj, void *opaque,
|
|
|
|
const char *part)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(uc, obj);
|
|
|
|
|
|
|
|
return OBJECT(mr->container);
|
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Backports commit d7bce9999df85c56c8cb1fcffd944d51bff8ff48 from qemu
2018-02-20 03:57:03 +00:00
|
|
|
static void memory_region_get_priority(struct uc_struct *uc,
|
|
|
|
Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(uc, obj);
|
|
|
|
int32_t value = mr->priority;
|
|
|
|
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 03:31:04 +00:00
|
|
|
visit_type_int32(v, name, &value, errp);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Backports commit d7bce9999df85c56c8cb1fcffd944d51bff8ff48 from qemu
2018-02-20 03:57:03 +00:00
|
|
|
static void memory_region_get_size(struct uc_struct *uc,
|
|
|
|
Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(uc, obj);
|
|
|
|
uint64_t value = memory_region_size(mr);
|
|
|
|
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 03:31:04 +00:00
|
|
|
visit_type_uint64(v, name, &value, errp);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_initfn(struct uc_struct *uc, Object *obj, void *opaque)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(uc, obj);
|
|
|
|
ObjectProperty *op;
|
|
|
|
|
|
|
|
mr->ops = &unassigned_mem_ops;
|
|
|
|
mr->enabled = true;
|
|
|
|
mr->romd_mode = true;
|
2018-02-14 13:36:25 +00:00
|
|
|
mr->global_locking = true;
|
2015-08-21 07:04:50 +00:00
|
|
|
mr->destructor = memory_region_destructor_none;
|
|
|
|
QTAILQ_INIT(&mr->subregions);
|
|
|
|
|
2018-02-21 18:58:46 +00:00
|
|
|
op = object_property_add(mr->uc, OBJECT(mr), "container",
|
2015-08-21 07:04:50 +00:00
|
|
|
"link<" TYPE_MEMORY_REGION ">",
|
|
|
|
memory_region_get_container,
|
|
|
|
NULL, /* memory_region_set_container */
|
|
|
|
NULL, NULL, &error_abort);
|
|
|
|
op->resolve = memory_region_resolve_container;
|
|
|
|
|
2018-02-21 18:58:46 +00:00
|
|
|
object_property_add(mr->uc, OBJECT(mr), "addr", "uint64",
|
2015-08-21 07:04:50 +00:00
|
|
|
memory_region_get_addr,
|
|
|
|
NULL, /* memory_region_set_addr */
|
|
|
|
NULL, NULL, &error_abort);
|
2018-02-21 18:58:46 +00:00
|
|
|
object_property_add(mr->uc, OBJECT(mr), "priority", "uint32",
|
2015-08-21 07:04:50 +00:00
|
|
|
memory_region_get_priority,
|
|
|
|
NULL, /* memory_region_set_priority */
|
|
|
|
NULL, NULL, &error_abort);
|
2018-02-21 18:58:46 +00:00
|
|
|
object_property_add(mr->uc, OBJECT(mr), "size", "uint64",
|
2015-08-21 07:04:50 +00:00
|
|
|
memory_region_get_size,
|
|
|
|
NULL, /* memory_region_set_size, */
|
|
|
|
NULL, NULL, &error_abort);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t unassigned_mem_read(struct uc_struct* uc, hwaddr addr, unsigned size)
|
|
|
|
{
|
|
|
|
#ifdef DEBUG_UNASSIGNED
|
|
|
|
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
|
|
|
#endif
|
|
|
|
if (uc->current_cpu != NULL) {
|
|
|
|
cpu_unassigned_access(uc->current_cpu, addr, false, false, 0, size);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unassigned_mem_write(struct uc_struct* uc, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
|
|
|
#ifdef DEBUG_UNASSIGNED
|
|
|
|
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
|
|
|
|
#endif
|
|
|
|
if (uc->current_cpu != NULL) {
|
|
|
|
cpu_unassigned_access(uc->current_cpu, addr, true, false, 0, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
|
|
|
|
unsigned size, bool is_write)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const MemoryRegionOps unassigned_mem_ops = {
|
2017-01-21 01:28:22 +00:00
|
|
|
NULL,
|
|
|
|
NULL,
|
2018-02-12 21:56:31 +00:00
|
|
|
NULL,
|
|
|
|
NULL,
|
2017-01-19 11:50:28 +00:00
|
|
|
DEVICE_NATIVE_ENDIAN,
|
2017-01-22 13:27:17 +00:00
|
|
|
|
2017-01-21 01:28:22 +00:00
|
|
|
{0,0,false,unassigned_mem_accepts},
|
2015-08-21 07:04:50 +00:00
|
|
|
};
|
|
|
|
|
memory: Don't use memcpy for ram_device regions
With a vfio assigned device we lay down a base MemoryRegion registered
as an IO region, giving us read & write accessors. If the region
supports mmap, we lay down a higher priority sub-region MemoryRegion
on top of the base layer initialized as a RAM device pointer to the
mmap. Finally, if we have any quirks for the device (ie. address
ranges that need additional virtualization support), we put another IO
sub-region on top of the mmap MemoryRegion. When this is flattened,
we now potentially have sub-page mmap MemoryRegions exposed which
cannot be directly mapped through KVM.
This is as expected, but a subtle detail of this is that we end up
with two different access mechanisms through QEMU. If we disable the
mmap MemoryRegion, we make use of the IO MemoryRegion and service
accesses using pread and pwrite to the vfio device file descriptor.
If the mmap MemoryRegion is enabled and results in one of these
sub-page gaps, QEMU handles the access as RAM, using memcpy to the
mmap. Using either pread/pwrite or the mmap directly should be
correct, but using memcpy causes us problems. I expect that not only
does memcpy not necessarily honor the original width and alignment in
performing a copy, but it potentially also uses processor instructions
not intended for MMIO spaces. It turns out that this has been a
problem for Realtek NIC assignment, which has such a quirk that
creates a sub-page mmap MemoryRegion access.
To resolve this, we disable memory_access_is_direct() for ram_device
regions since QEMU assumes that it can use memcpy for those regions.
Instead we access through MemoryRegionOps, which replaces the memcpy
with simple de-references of standard sizes to the host memory.
With this patch we attempt to provide unrestricted access to the RAM
device, allowing byte through qword access as well as unaligned
access. The assumption here is that accesses initiated by the VM are
driven by a device specific driver, which knows the device
capabilities. If unaligned accesses are not supported by the device,
we don't want them to work in a VM by performing multiple aligned
accesses to compose the unaligned access. A down-side of this
philosophy is that the xp command from the monitor attempts to use
the largest available access weidth, unaware of the underlying
device. Using memcpy had this same restriction, but at least now an
operator can dump individual registers, even if blocks of device
memory may result in access widths beyond the capabilities of a
given device (RTL NICs only support up to dword).
Backports commit 1b16ded6a512809f99c133a97f19026fe612b2de from qemu
2018-02-26 04:05:10 +00:00
|
|
|
static uint64_t memory_region_ram_device_read(struct uc_struct *uc,
|
|
|
|
void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = opaque;
|
|
|
|
uint64_t data = (uint64_t)~0;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
data = *(uint8_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
data = *(uint16_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
data = *(uint32_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
data = *(uint64_t *)(mr->ram_block->host + addr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unicorn: commented out
|
|
|
|
//trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_ram_device_write(struct uc_struct *uc,
|
|
|
|
void *opaque, hwaddr addr,
|
|
|
|
uint64_t data, unsigned size)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = opaque;
|
|
|
|
|
|
|
|
// Unicorn: commented out
|
|
|
|
//trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
*(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
*(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
*(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
*(uint64_t *)(mr->ram_block->host + addr) = data;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MemoryRegionOps ram_device_mem_ops = {
|
|
|
|
memory_region_ram_device_read,
|
|
|
|
memory_region_ram_device_write,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
2018-03-02 16:24:12 +00:00
|
|
|
DEVICE_HOST_ENDIAN,
|
memory: Don't use memcpy for ram_device regions
With a vfio assigned device we lay down a base MemoryRegion registered
as an IO region, giving us read & write accessors. If the region
supports mmap, we lay down a higher priority sub-region MemoryRegion
on top of the base layer initialized as a RAM device pointer to the
mmap. Finally, if we have any quirks for the device (ie. address
ranges that need additional virtualization support), we put another IO
sub-region on top of the mmap MemoryRegion. When this is flattened,
we now potentially have sub-page mmap MemoryRegions exposed which
cannot be directly mapped through KVM.
This is as expected, but a subtle detail of this is that we end up
with two different access mechanisms through QEMU. If we disable the
mmap MemoryRegion, we make use of the IO MemoryRegion and service
accesses using pread and pwrite to the vfio device file descriptor.
If the mmap MemoryRegion is enabled and results in one of these
sub-page gaps, QEMU handles the access as RAM, using memcpy to the
mmap. Using either pread/pwrite or the mmap directly should be
correct, but using memcpy causes us problems. I expect that not only
does memcpy not necessarily honor the original width and alignment in
performing a copy, but it potentially also uses processor instructions
not intended for MMIO spaces. It turns out that this has been a
problem for Realtek NIC assignment, which has such a quirk that
creates a sub-page mmap MemoryRegion access.
To resolve this, we disable memory_access_is_direct() for ram_device
regions since QEMU assumes that it can use memcpy for those regions.
Instead we access through MemoryRegionOps, which replaces the memcpy
with simple de-references of standard sizes to the host memory.
With this patch we attempt to provide unrestricted access to the RAM
device, allowing byte through qword access as well as unaligned
access. The assumption here is that accesses initiated by the VM are
driven by a device specific driver, which knows the device
capabilities. If unaligned accesses are not supported by the device,
we don't want them to work in a VM by performing multiple aligned
accesses to compose the unaligned access. A down-side of this
philosophy is that the xp command from the monitor attempts to use
the largest available access weidth, unaware of the underlying
device. Using memcpy had this same restriction, but at least now an
operator can dump individual registers, even if blocks of device
memory may result in access widths beyond the capabilities of a
given device (RTL NICs only support up to dword).
Backports commit 1b16ded6a512809f99c133a97f19026fe612b2de from qemu
2018-02-26 04:05:10 +00:00
|
|
|
// valid
|
|
|
|
{
|
|
|
|
1, 8,
|
|
|
|
true,
|
|
|
|
},
|
|
|
|
// impl
|
|
|
|
{
|
|
|
|
1, 8,
|
|
|
|
true,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
bool memory_region_access_valid(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
unsigned size,
|
|
|
|
bool is_write)
|
|
|
|
{
|
|
|
|
int access_size_min, access_size_max;
|
|
|
|
int access_size, i;
|
|
|
|
|
|
|
|
if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mr->ops->valid.accepts) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
access_size_min = mr->ops->valid.min_access_size;
|
|
|
|
if (!mr->ops->valid.min_access_size) {
|
|
|
|
access_size_min = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
access_size_max = mr->ops->valid.max_access_size;
|
|
|
|
if (!mr->ops->valid.max_access_size) {
|
|
|
|
access_size_max = 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
access_size = MAX(MIN(size, access_size_max), access_size_min);
|
|
|
|
for (i = 0; i < size; i += access_size) {
|
|
|
|
if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
|
|
|
|
is_write)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *pval,
|
|
|
|
unsigned size,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2018-02-12 21:56:31 +00:00
|
|
|
*pval = 0;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
if (mr->ops->read) {
|
2018-02-12 21:56:31 +00:00
|
|
|
return access_with_adjusted_size(addr, pval, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_read_accessor,
|
|
|
|
mr, attrs);
|
|
|
|
} else if (mr->ops->read_with_attrs) {
|
|
|
|
return access_with_adjusted_size(addr, pval, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_read_with_attrs_accessor,
|
|
|
|
mr, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
} else {
|
2018-02-12 21:56:31 +00:00
|
|
|
return access_with_adjusted_size(addr, pval, size, 1, 4,
|
|
|
|
memory_region_oldmmio_read_accessor,
|
|
|
|
mr, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-12 22:26:24 +00:00
|
|
|
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t *pval,
|
|
|
|
unsigned size,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2018-02-12 21:56:31 +00:00
|
|
|
MemTxResult r;
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
if (!memory_region_access_valid(mr, addr, size, false)) {
|
|
|
|
*pval = unassigned_mem_read(mr->uc, addr, size);
|
2018-02-12 21:56:31 +00:00
|
|
|
return MEMTX_DECODE_ERROR;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 21:56:31 +00:00
|
|
|
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
adjust_endianness(mr, pval, size);
|
2018-02-12 21:56:31 +00:00
|
|
|
return r;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 22:26:24 +00:00
|
|
|
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
|
|
|
|
hwaddr addr,
|
|
|
|
uint64_t data,
|
|
|
|
unsigned size,
|
|
|
|
MemTxAttrs attrs)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
if (!memory_region_access_valid(mr, addr, size, true)) {
|
|
|
|
unassigned_mem_write(mr->uc, addr, data, size);
|
2018-02-12 21:56:31 +00:00
|
|
|
return MEMTX_DECODE_ERROR;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
adjust_endianness(mr, &data, size);
|
|
|
|
|
|
|
|
if (mr->ops->write) {
|
2018-02-12 21:56:31 +00:00
|
|
|
return access_with_adjusted_size(addr, &data, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_write_accessor, mr,
|
|
|
|
attrs);
|
|
|
|
} else if (mr->ops->write_with_attrs) {
|
|
|
|
return
|
|
|
|
access_with_adjusted_size(addr, &data, size,
|
|
|
|
mr->ops->impl.min_access_size,
|
|
|
|
mr->ops->impl.max_access_size,
|
|
|
|
memory_region_write_with_attrs_accessor,
|
|
|
|
mr, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
} else {
|
2018-02-12 21:56:31 +00:00
|
|
|
return access_with_adjusted_size(addr, &data, size, 1, 4,
|
|
|
|
memory_region_oldmmio_write_accessor,
|
|
|
|
mr, attrs);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const MemoryRegionOps *ops,
|
|
|
|
void *opaque,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size)
|
|
|
|
{
|
|
|
|
memory_region_init(uc, mr, owner, name, size);
|
2018-02-21 16:21:53 +00:00
|
|
|
mr->ops = ops ? ops : &unassigned_mem_ops;
|
2015-08-21 07:04:50 +00:00
|
|
|
mr->opaque = opaque;
|
|
|
|
mr->terminates = true;
|
|
|
|
}
|
|
|
|
|
2018-03-04 03:23:26 +00:00
|
|
|
void memory_region_init_ram_nomigrate(struct uc_struct *uc,
|
|
|
|
MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
uint32_t perms,
|
|
|
|
Error **errp)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
memory_region_init(uc, mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
2015-08-26 20:29:54 +00:00
|
|
|
if (!(perms & UC_PROT_WRITE)) {
|
|
|
|
mr->readonly = true;
|
|
|
|
}
|
2015-08-28 01:03:17 +00:00
|
|
|
mr->perms = perms;
|
2015-08-21 07:04:50 +00:00
|
|
|
mr->terminates = true;
|
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2018-02-21 13:07:39 +00:00
|
|
|
mr->ram_block = qemu_ram_alloc(size, mr, errp);
|
2018-02-13 13:54:38 +00:00
|
|
|
mr->dirty_log_mask = tcg_enabled(uc) ? (1 << DIRTY_MEMORY_CODE) : 0;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
memory_region_init(uc, mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
|
|
|
mr->terminates = true;
|
2018-02-18 00:39:20 +00:00
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2018-02-13 13:54:38 +00:00
|
|
|
mr->dirty_log_mask = tcg_enabled(uc) ? (1 << DIRTY_MEMORY_CODE) : 0;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
|
|
|
|
assert(ptr != NULL);
|
2018-02-21 16:24:24 +00:00
|
|
|
mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-17 22:02:55 +00:00
|
|
|
void memory_region_init_resizeable_ram(struct uc_struct *uc,
|
|
|
|
MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
uint64_t max_size,
|
|
|
|
void (*resized)(const char*,
|
|
|
|
uint64_t length,
|
|
|
|
void *host),
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
memory_region_init(uc, mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
|
|
|
mr->terminates = true;
|
|
|
|
mr->destructor = memory_region_destructor_ram;
|
2018-02-21 13:07:39 +00:00
|
|
|
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
|
|
|
|
mr, errp);
|
2018-02-17 22:02:55 +00:00
|
|
|
mr->dirty_log_mask = tcg_enabled(uc) ? (1 << DIRTY_MEMORY_CODE) : 0;
|
2018-02-25 05:26:03 +00:00
|
|
|
}
|
|
|
|
|
2018-03-04 03:28:41 +00:00
|
|
|
void memory_region_init_rom_nomigrate(struct uc_struct *uc,
|
|
|
|
MemoryRegion *mr,
|
|
|
|
struct Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
Error **errp)
|
2018-02-25 05:26:03 +00:00
|
|
|
{
|
|
|
|
memory_region_init(uc, mr, owner, name, size);
|
|
|
|
mr->ram = true;
|
|
|
|
mr->readonly = true;
|
|
|
|
mr->terminates = true;
|
|
|
|
mr->destructor = memory_region_destructor_ram;
|
|
|
|
mr->ram_block = qemu_ram_alloc(size, mr, errp);
|
|
|
|
mr->dirty_log_mask = tcg_enabled(uc) ? (1 << DIRTY_MEMORY_CODE) : 0;
|
2018-02-17 22:02:55 +00:00
|
|
|
}
|
|
|
|
|
2018-02-26 03:59:02 +00:00
|
|
|
void memory_region_init_ram_device_ptr(struct uc_struct *uc,
|
|
|
|
MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
uint64_t size,
|
|
|
|
void *ptr)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2018-02-26 03:59:02 +00:00
|
|
|
memory_region_init_ram_ptr(uc, mr, owner, name, size, ptr);
|
|
|
|
mr->ram_device = true;
|
memory: Don't use memcpy for ram_device regions
With a vfio assigned device we lay down a base MemoryRegion registered
as an IO region, giving us read & write accessors. If the region
supports mmap, we lay down a higher priority sub-region MemoryRegion
on top of the base layer initialized as a RAM device pointer to the
mmap. Finally, if we have any quirks for the device (ie. address
ranges that need additional virtualization support), we put another IO
sub-region on top of the mmap MemoryRegion. When this is flattened,
we now potentially have sub-page mmap MemoryRegions exposed which
cannot be directly mapped through KVM.
This is as expected, but a subtle detail of this is that we end up
with two different access mechanisms through QEMU. If we disable the
mmap MemoryRegion, we make use of the IO MemoryRegion and service
accesses using pread and pwrite to the vfio device file descriptor.
If the mmap MemoryRegion is enabled and results in one of these
sub-page gaps, QEMU handles the access as RAM, using memcpy to the
mmap. Using either pread/pwrite or the mmap directly should be
correct, but using memcpy causes us problems. I expect that not only
does memcpy not necessarily honor the original width and alignment in
performing a copy, but it potentially also uses processor instructions
not intended for MMIO spaces. It turns out that this has been a
problem for Realtek NIC assignment, which has such a quirk that
creates a sub-page mmap MemoryRegion access.
To resolve this, we disable memory_access_is_direct() for ram_device
regions since QEMU assumes that it can use memcpy for those regions.
Instead we access through MemoryRegionOps, which replaces the memcpy
with simple de-references of standard sizes to the host memory.
With this patch we attempt to provide unrestricted access to the RAM
device, allowing byte through qword access as well as unaligned
access. The assumption here is that accesses initiated by the VM are
driven by a device specific driver, which knows the device
capabilities. If unaligned accesses are not supported by the device,
we don't want them to work in a VM by performing multiple aligned
accesses to compose the unaligned access. A down-side of this
philosophy is that the xp command from the monitor attempts to use
the largest available access weidth, unaware of the underlying
device. Using memcpy had this same restriction, but at least now an
operator can dump individual registers, even if blocks of device
memory may result in access widths beyond the capabilities of a
given device (RTL NICs only support up to dword).
Backports commit 1b16ded6a512809f99c133a97f19026fe612b2de from qemu
2018-02-26 04:05:10 +00:00
|
|
|
mr->ops = &ram_device_mem_ops;
|
|
|
|
mr->opaque = mr;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr,
|
|
|
|
Object *owner,
|
|
|
|
const char *name,
|
|
|
|
MemoryRegion *orig,
|
|
|
|
hwaddr offset,
|
|
|
|
uint64_t size)
|
|
|
|
{
|
|
|
|
memory_region_init(uc, mr, owner, name, size);
|
|
|
|
mr->alias = orig;
|
|
|
|
mr->alias_offset = offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_finalize(struct uc_struct *uc, Object *obj, void *opaque)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = MEMORY_REGION(uc, obj);
|
|
|
|
|
2018-02-15 00:33:01 +00:00
|
|
|
assert(!mr->container);
|
|
|
|
|
|
|
|
/* We know the region is not visible in any address space (it
|
|
|
|
* does not have a container and cannot be a root either because
|
|
|
|
* it has no references, so we can blindly clear mr->enabled.
|
|
|
|
* memory_region_set_enabled instead could trigger a transaction
|
|
|
|
* and cause an infinite loop.
|
|
|
|
*/
|
|
|
|
mr->enabled = false;
|
|
|
|
memory_region_transaction_begin(uc);
|
|
|
|
while (!QTAILQ_EMPTY(&mr->subregions)) {
|
|
|
|
MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
|
|
|
|
memory_region_del_subregion(mr, subregion);
|
|
|
|
}
|
|
|
|
memory_region_transaction_commit(uc);
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
mr->destructor(mr);
|
2016-12-21 14:28:36 +00:00
|
|
|
g_free((char *)mr->name);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_ref(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
/* MMIO callbacks most likely will access data that belongs
|
|
|
|
* to the owner, hence the need to ref/unref the owner whenever
|
|
|
|
* the memory region is in use.
|
|
|
|
*
|
|
|
|
* The memory region is a child of its owner. As long as the
|
|
|
|
* owner doesn't call unparent itself on the memory region,
|
|
|
|
* ref-ing the owner will also keep the memory region alive.
|
2018-02-18 01:09:58 +00:00
|
|
|
* Memory regions without an owner are supposed to never go away;
|
|
|
|
* we do not ref/unref them because it slows down DMA sensibly.
|
2015-08-21 07:04:50 +00:00
|
|
|
*/
|
2018-02-18 01:09:58 +00:00
|
|
|
if (mr && mr->owner) {
|
|
|
|
object_ref(mr->owner);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_unref(MemoryRegion *mr)
|
|
|
|
{
|
2018-02-18 01:09:58 +00:00
|
|
|
if (mr && mr->owner) {
|
|
|
|
object_unref(mr->uc, mr->owner);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t memory_region_size(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
if (int128_eq(mr->size, int128_2_64())) {
|
|
|
|
return UINT64_MAX;
|
|
|
|
}
|
|
|
|
return int128_get64(mr->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *memory_region_name(const MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
if (!mr->name) {
|
|
|
|
((MemoryRegion *)mr)->name =
|
|
|
|
object_get_canonical_path_component(OBJECT(mr));
|
|
|
|
}
|
|
|
|
return mr->name;
|
|
|
|
}
|
|
|
|
|
2018-02-26 03:59:02 +00:00
|
|
|
bool memory_region_is_ram_device(MemoryRegion *mr)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2018-02-26 03:59:02 +00:00
|
|
|
return mr->ram_device;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-13 13:41:40 +00:00
|
|
|
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
return mr->dirty_log_mask;
|
|
|
|
}
|
|
|
|
|
2018-02-13 13:41:40 +00:00
|
|
|
bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
|
|
|
|
{
|
|
|
|
return memory_region_get_dirty_log_mask(mr) & (1 << client);
|
|
|
|
}
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
|
|
|
|
{
|
|
|
|
if (mr->readonly != readonly) {
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
mr->readonly = readonly;
|
2015-08-28 06:19:32 +00:00
|
|
|
if (readonly) {
|
|
|
|
mr->perms &= ~UC_PROT_WRITE;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
mr->perms |= UC_PROT_WRITE;
|
|
|
|
}
|
2015-08-21 07:04:50 +00:00
|
|
|
mr->uc->memory_region_update_pending |= mr->enabled;
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-14 13:36:25 +00:00
|
|
|
void memory_region_clear_global_locking(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
mr->global_locking = false;
|
|
|
|
}
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
|
|
|
|
{
|
|
|
|
if (mr->romd_mode != romd_mode) {
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
mr->romd_mode = romd_mode;
|
|
|
|
mr->uc->memory_region_update_pending |= mr->enabled;
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int memory_region_get_fd(MemoryRegion *mr)
|
|
|
|
{
|
2018-02-24 08:33:42 +00:00
|
|
|
int fd;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-24 08:33:42 +00:00
|
|
|
// Unicorn: commented out
|
|
|
|
//rcu_read_lock();
|
|
|
|
while (mr->alias) {
|
|
|
|
mr = mr->alias;
|
|
|
|
}
|
|
|
|
fd = mr->ram_block->fd;
|
|
|
|
//rcu_read_unlock();
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-24 08:33:42 +00:00
|
|
|
return fd;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
|
|
|
{
|
2018-02-21 12:43:01 +00:00
|
|
|
void *ptr;
|
|
|
|
uint64_t offset = 0;
|
|
|
|
|
|
|
|
// Unicorn: commented out
|
|
|
|
// rcu_read_lock();
|
|
|
|
while (mr->alias) {
|
|
|
|
offset += mr->alias_offset;
|
|
|
|
mr = mr->alias;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-21 13:07:39 +00:00
|
|
|
assert(mr->ram_block);
|
2018-02-24 21:10:07 +00:00
|
|
|
ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, offset);
|
2018-02-21 12:43:01 +00:00
|
|
|
// Unicorn: commented out
|
|
|
|
//rcu_read_unlock();
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-24 21:10:07 +00:00
|
|
|
return ptr;
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-24 20:52:04 +00:00
|
|
|
MemoryRegion *memory_region_from_host(struct uc_struct *uc, void *ptr, ram_addr_t *offset)
|
|
|
|
{
|
|
|
|
RAMBlock *block;
|
|
|
|
|
|
|
|
block = qemu_ram_block_from_host(uc, ptr, false, offset);
|
|
|
|
if (!block) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return block->mr;
|
|
|
|
}
|
|
|
|
|
2018-02-21 13:00:50 +00:00
|
|
|
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
|
|
|
|
}
|
|
|
|
|
2018-02-13 16:23:17 +00:00
|
|
|
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
|
|
|
|
hwaddr size, unsigned client)
|
|
|
|
{
|
2018-02-21 13:07:39 +00:00
|
|
|
assert(mr->ram_block);
|
|
|
|
return cpu_physical_memory_test_and_clear_dirty(mr->uc,
|
|
|
|
memory_region_get_ram_addr(mr) + addr, size, client);
|
2018-02-13 16:23:17 +00:00
|
|
|
}
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
static void memory_region_update_container_subregions(MemoryRegion *subregion)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr = subregion->container;
|
|
|
|
MemoryRegion *other;
|
|
|
|
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
|
|
|
|
memory_region_ref(subregion);
|
|
|
|
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
|
|
|
|
if (subregion->priority >= other->priority) {
|
|
|
|
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
|
|
|
|
done:
|
|
|
|
mr->uc->memory_region_update_pending |= mr->enabled && subregion->enabled;
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_region_add_subregion_common(MemoryRegion *mr,
|
|
|
|
hwaddr offset,
|
|
|
|
MemoryRegion *subregion)
|
|
|
|
{
|
|
|
|
assert(!subregion->container);
|
|
|
|
subregion->container = mr;
|
|
|
|
subregion->addr = offset;
|
2015-08-28 06:19:32 +00:00
|
|
|
subregion->end = offset + int128_get64(subregion->size);
|
2015-08-21 07:04:50 +00:00
|
|
|
memory_region_update_container_subregions(subregion);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_add_subregion(MemoryRegion *mr,
|
|
|
|
hwaddr offset,
|
|
|
|
MemoryRegion *subregion)
|
|
|
|
{
|
|
|
|
subregion->priority = 0;
|
|
|
|
memory_region_add_subregion_common(mr, offset, subregion);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
|
|
|
hwaddr offset,
|
|
|
|
MemoryRegion *subregion,
|
|
|
|
int priority)
|
|
|
|
{
|
|
|
|
subregion->priority = priority;
|
|
|
|
memory_region_add_subregion_common(mr, offset, subregion);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_del_subregion(MemoryRegion *mr,
|
|
|
|
MemoryRegion *subregion)
|
|
|
|
{
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
assert(subregion->container == mr);
|
|
|
|
subregion->container = NULL;
|
|
|
|
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
|
|
|
|
memory_region_unref(subregion);
|
|
|
|
mr->uc->memory_region_update_pending |= mr->enabled && subregion->enabled;
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
|
|
|
|
{
|
|
|
|
if (enabled == mr->enabled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
mr->enabled = enabled;
|
|
|
|
mr->uc->memory_region_update_pending = true;
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
|
2018-02-17 21:02:13 +00:00
|
|
|
void memory_region_set_size(MemoryRegion *mr, uint64_t size)
|
|
|
|
{
|
|
|
|
Int128 s = int128_make64(size);
|
|
|
|
|
|
|
|
if (size == UINT64_MAX) {
|
|
|
|
s = int128_2_64();
|
|
|
|
}
|
|
|
|
if (int128_eq(s, mr->size)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
mr->size = s;
|
|
|
|
mr->uc->memory_region_update_pending = true;
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
static void memory_region_readd_subregion(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
MemoryRegion *container = mr->container;
|
|
|
|
|
|
|
|
if (container) {
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
memory_region_ref(mr);
|
|
|
|
memory_region_del_subregion(container, mr);
|
|
|
|
mr->container = container;
|
|
|
|
memory_region_update_container_subregions(mr);
|
|
|
|
memory_region_unref(mr);
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
|
|
|
|
{
|
|
|
|
if (addr != mr->addr) {
|
|
|
|
mr->addr = addr;
|
|
|
|
memory_region_readd_subregion(mr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
|
|
|
|
{
|
|
|
|
assert(mr->alias);
|
|
|
|
|
|
|
|
if (offset == mr->alias_offset) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memory_region_transaction_begin(mr->uc);
|
|
|
|
mr->alias_offset = offset;
|
|
|
|
mr->uc->memory_region_update_pending |= mr->enabled;
|
|
|
|
memory_region_transaction_commit(mr->uc);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t memory_region_get_alignment(const MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
return mr->align;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cmp_flatrange_addr(const void *addr_, const void *fr_)
|
|
|
|
{
|
|
|
|
const AddrRange *addr = addr_;
|
|
|
|
const FlatRange *fr = fr_;
|
|
|
|
|
|
|
|
if (int128_le(addrrange_end(*addr), fr->addr.start)) {
|
|
|
|
return -1;
|
|
|
|
} else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
|
|
|
|
{
|
|
|
|
return bsearch(&addr, view->ranges, view->nr,
|
|
|
|
sizeof(FlatRange), cmp_flatrange_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool memory_region_is_mapped(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
return mr->container ? true : false;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:17:16 +00:00
|
|
|
/* Same as memory_region_find, but it does not add a reference to the
|
|
|
|
* returned region. It must be called from an RCU critical section.
|
|
|
|
*/
|
|
|
|
static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
|
|
|
|
hwaddr addr, uint64_t size)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2017-01-19 11:50:28 +00:00
|
|
|
MemoryRegionSection ret = { NULL };
|
2015-08-21 07:04:50 +00:00
|
|
|
MemoryRegion *root;
|
|
|
|
AddressSpace *as;
|
|
|
|
AddrRange range;
|
|
|
|
FlatView *view;
|
|
|
|
FlatRange *fr;
|
|
|
|
|
|
|
|
addr += mr->addr;
|
|
|
|
for (root = mr; root->container; ) {
|
|
|
|
root = root->container;
|
|
|
|
addr += root->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
as = memory_region_to_address_space(root);
|
|
|
|
if (!as) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
range = addrrange_make(int128_make64(addr), int128_make64(size));
|
|
|
|
|
2018-02-21 16:17:16 +00:00
|
|
|
// Unicorn: Uses atomic_read instead of atomic_rcu_read
|
|
|
|
view = atomic_read(&as->current_map);
|
2015-08-21 07:04:50 +00:00
|
|
|
fr = flatview_lookup(view, range);
|
|
|
|
if (!fr) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
|
|
|
|
--fr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret.mr = fr->mr;
|
|
|
|
ret.address_space = as;
|
|
|
|
range = addrrange_intersection(range, fr->addr);
|
|
|
|
ret.offset_within_region = fr->offset_in_region;
|
|
|
|
ret.offset_within_region += int128_get64(int128_sub(range.start,
|
|
|
|
fr->addr.start));
|
|
|
|
ret.size = range.size;
|
|
|
|
ret.offset_within_address_space = int128_get64(range.start);
|
|
|
|
ret.readonly = fr->readonly;
|
2018-02-21 16:17:16 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-21 16:17:16 +00:00
|
|
|
MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
|
|
|
hwaddr addr, uint64_t size)
|
|
|
|
{
|
|
|
|
MemoryRegionSection ret;
|
|
|
|
// Unicorn: commented out
|
|
|
|
//rcu_read_lock();
|
|
|
|
ret = memory_region_find_rcu(mr, addr, size);
|
|
|
|
if (ret.mr) {
|
|
|
|
memory_region_ref(ret.mr);
|
|
|
|
}
|
|
|
|
// Unicorn: commented out
|
|
|
|
//rcu_read_unlock();
|
2015-08-21 07:04:50 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:17:16 +00:00
|
|
|
bool memory_region_present(MemoryRegion *container, hwaddr addr)
|
|
|
|
{
|
|
|
|
MemoryRegion *mr;
|
|
|
|
|
|
|
|
// Unicorn: commented out
|
|
|
|
//rcu_read_lock();
|
|
|
|
mr = memory_region_find_rcu(container, addr, 1).mr;
|
|
|
|
// Unicorn: commented out
|
|
|
|
//rcu_read_unlock();
|
|
|
|
return mr && mr != container;
|
|
|
|
}
|
|
|
|
|
2018-02-26 15:48:57 +00:00
|
|
|
static QEMU_UNUSED_FUNC void listener_add_address_space(MemoryListener *listener,
|
|
|
|
AddressSpace *as)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
FlatView *view;
|
|
|
|
FlatRange *fr;
|
|
|
|
|
2018-02-26 15:13:34 +00:00
|
|
|
if (listener->begin) {
|
|
|
|
listener->begin(listener);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
2018-02-26 15:13:34 +00:00
|
|
|
if (as->uc->global_dirty_log) {
|
2015-08-21 07:04:50 +00:00
|
|
|
if (listener->log_global_start) {
|
|
|
|
listener->log_global_start(listener);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
view = address_space_get_flatview(as);
|
|
|
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
2017-01-19 11:50:28 +00:00
|
|
|
MemoryRegionSection section = MemoryRegionSection_make(
|
|
|
|
fr->mr,
|
|
|
|
as,
|
|
|
|
fr->offset_in_region,
|
|
|
|
fr->addr.size,
|
|
|
|
int128_get64(fr->addr.start),
|
|
|
|
fr->readonly);
|
2018-02-26 15:13:34 +00:00
|
|
|
if (fr->dirty_log_mask && listener->log_start) {
|
|
|
|
listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
|
|
|
|
}
|
2015-08-21 07:04:50 +00:00
|
|
|
if (listener->region_add) {
|
|
|
|
listener->region_add(listener, §ion);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
flatview_unref(view);
|
|
|
|
}
|
|
|
|
|
2018-02-26 15:13:34 +00:00
|
|
|
void memory_listener_register(struct uc_struct* uc, MemoryListener *listener, AddressSpace *as)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
MemoryListener *other = NULL;
|
|
|
|
|
2018-02-26 15:13:34 +00:00
|
|
|
listener->address_space = as;
|
2015-08-21 07:04:50 +00:00
|
|
|
if (QTAILQ_EMPTY(&uc->memory_listeners)
|
|
|
|
|| listener->priority >= QTAILQ_LAST(&uc->memory_listeners,
|
|
|
|
memory_listeners)->priority) {
|
|
|
|
QTAILQ_INSERT_TAIL(&uc->memory_listeners, listener, link);
|
|
|
|
} else {
|
|
|
|
QTAILQ_FOREACH(other, &uc->memory_listeners, link) {
|
|
|
|
if (listener->priority < other->priority) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
QTAILQ_INSERT_BEFORE(other, listener, link);
|
|
|
|
}
|
2018-02-26 15:33:15 +00:00
|
|
|
|
|
|
|
if (QTAILQ_EMPTY(&as->listeners)
|
|
|
|
|| listener->priority >= QTAILQ_LAST(&as->listeners,
|
|
|
|
memory_listeners)->priority) {
|
|
|
|
QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
|
|
|
|
} else {
|
|
|
|
QTAILQ_FOREACH(other, &as->listeners, link_as) {
|
|
|
|
if (listener->priority < other->priority) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
QTAILQ_INSERT_BEFORE(other, listener, link_as);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unicorn: TODO: Handle leaks that occur when this is uncommented
|
|
|
|
//listener_add_address_space(listener, as);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void memory_listener_unregister(struct uc_struct *uc, MemoryListener *listener)
|
|
|
|
{
|
|
|
|
QTAILQ_REMOVE(&uc->memory_listeners, listener, link);
|
2018-02-26 15:33:15 +00:00
|
|
|
QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root, const char *name)
|
|
|
|
{
|
|
|
|
if (QTAILQ_EMPTY(&uc->address_spaces)) {
|
|
|
|
memory_init(uc);
|
|
|
|
}
|
|
|
|
|
|
|
|
memory_region_transaction_begin(uc);
|
2018-02-18 04:37:43 +00:00
|
|
|
as->ref_count = 1;
|
2015-08-21 07:04:50 +00:00
|
|
|
as->uc = uc;
|
|
|
|
as->root = root;
|
2018-02-18 04:37:43 +00:00
|
|
|
as->malloced = false;
|
2018-03-04 07:08:25 +00:00
|
|
|
as->current_map = flatview_new();
|
2018-02-26 15:33:15 +00:00
|
|
|
QTAILQ_INIT(&as->listeners);
|
2015-08-21 07:04:50 +00:00
|
|
|
QTAILQ_INSERT_TAIL(&uc->address_spaces, as, address_spaces_link);
|
|
|
|
as->name = g_strdup(name ? name : "anonymous");
|
2018-03-04 07:02:41 +00:00
|
|
|
as->dispatch = NULL;
|
2015-08-21 07:04:50 +00:00
|
|
|
uc->memory_region_update_pending |= root->enabled;
|
|
|
|
memory_region_transaction_commit(uc);
|
|
|
|
}
|
|
|
|
|
2018-02-18 04:37:43 +00:00
|
|
|
static void do_address_space_destroy(AddressSpace *as)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
2018-02-18 22:52:03 +00:00
|
|
|
// Unicorn: commented out
|
2018-02-18 04:37:43 +00:00
|
|
|
bool do_free = as->malloced;
|
|
|
|
|
|
|
|
address_space_destroy_dispatch(as);
|
|
|
|
|
|
|
|
// TODO(danghvu): why assert fail here?
|
|
|
|
//QTAILQ_FOREACH(listener, &as->uc->memory_listeners, link) {
|
2018-02-26 15:33:15 +00:00
|
|
|
// assert(QTAILQ_EMPTY(&as->listeners));
|
2018-02-18 04:37:43 +00:00
|
|
|
//}
|
|
|
|
|
|
|
|
flatview_unref(as->current_map);
|
|
|
|
g_free(as->name);
|
|
|
|
// Unicorn: commented out
|
|
|
|
//g_free(as->ioeventfds);
|
|
|
|
memory_region_unref(as->root);
|
|
|
|
if (do_free) {
|
|
|
|
g_free(as);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
AddressSpace *address_space_init_shareable(struct uc_struct *uc, MemoryRegion *root, const char *name)
|
|
|
|
{
|
|
|
|
AddressSpace *as;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) {
|
|
|
|
if (root == as->root && as->malloced) {
|
|
|
|
as->ref_count++;
|
|
|
|
return as;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
as = g_malloc0(sizeof *as);
|
|
|
|
address_space_init(uc, as, root, name);
|
|
|
|
as->malloced = true;
|
|
|
|
return as;
|
|
|
|
}
|
|
|
|
|
|
|
|
void address_space_destroy(AddressSpace *as)
|
|
|
|
{
|
|
|
|
MemoryRegion *root = as->root;
|
|
|
|
|
|
|
|
as->ref_count--;
|
|
|
|
if (as->ref_count) {
|
|
|
|
return;
|
|
|
|
}
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
/* Flush out anything from MemoryListeners listening in on this */
|
|
|
|
memory_region_transaction_begin(as->uc);
|
|
|
|
as->root = NULL;
|
|
|
|
memory_region_transaction_commit(as->uc);
|
|
|
|
QTAILQ_REMOVE(&as->uc->address_spaces, as, address_spaces_link);
|
|
|
|
|
2018-02-18 04:37:43 +00:00
|
|
|
/* At this point, as->dispatch and as->current_map are dummy
|
|
|
|
* entries that the guest should never use. Wait for the old
|
|
|
|
* values to expire before freeing the data.
|
|
|
|
*/
|
|
|
|
as->root = root;
|
|
|
|
do_address_space_destroy(as);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-18 04:37:43 +00:00
|
|
|
// Unicorn: Commented out and call it directly
|
|
|
|
// call_rcu(as, do_address_space_destroy, rcu);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct MemoryRegionList MemoryRegionList;
|
|
|
|
|
|
|
|
struct MemoryRegionList {
|
|
|
|
const MemoryRegion *mr;
|
|
|
|
QTAILQ_ENTRY(MemoryRegionList) queue;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
|
|
|
|
|
|
|
|
static const TypeInfo memory_region_info = {
|
2017-01-19 11:50:28 +00:00
|
|
|
TYPE_MEMORY_REGION,
|
|
|
|
TYPE_OBJECT,
|
2017-01-22 13:27:17 +00:00
|
|
|
|
2017-01-21 01:28:22 +00:00
|
|
|
0,
|
|
|
|
sizeof(MemoryRegion),
|
|
|
|
NULL,
|
2017-01-19 11:50:28 +00:00
|
|
|
|
|
|
|
memory_region_initfn,
|
|
|
|
NULL,
|
2017-01-21 01:28:22 +00:00
|
|
|
memory_region_finalize,
|
2015-08-21 07:04:50 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
void memory_register_types(struct uc_struct *uc)
|
|
|
|
{
|
|
|
|
type_register_static(uc, &memory_region_info);
|
|
|
|
}
|
|
|
|
|