1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
/*
* Declarations for obsolete exec.c functions
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Avi Kivity <avi@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* later. See the COPYING file in the top-level directory.
*
*/
/*
* This header is for use by exec.c and memory.c ONLY. Do not include it.
* The functions declared here will be removed soon.
*/
#ifndef MEMORY_INTERNAL_H
#define MEMORY_INTERNAL_H
#ifndef CONFIG_USER_ONLY
#include "hw/xen/xen.h"
typedef struct AddressSpaceDispatch AddressSpaceDispatch;
void address_space_init_dispatch(AddressSpace *as);
void address_space_destroy_dispatch(AddressSpace *as);
extern const MemoryRegionOps unassigned_mem_ops;
bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
unsigned size, bool is_write);
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr);
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
void *qemu_get_ram_ptr(ram_addr_t addr);
void qemu_ram_free(ram_addr_t addr);
void qemu_ram_free_from_ptr(ram_addr_t addr);
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
unsigned client)
{
assert(client < DIRTY_MEMORY_NUM);
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & (1 << client);
}
/* read dirty bit (return 0 or 1) */
static inline bool cpu_physical_memory_is_dirty(ram_addr_t addr)
{
bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
bool migration =
cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
return vga && code && migration;
}
static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
int ret = 0;
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
ret |= cpu_physical_memory_get_dirty_flag(addr, client);
}
return ret;
}
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
assert(client < DIRTY_MEMORY_NUM);
ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= (1 << client);
}
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
{
cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_CODE);
}
static inline int cpu_physical_memory_clear_dirty_flag(ram_addr_t addr,
unsigned client)
{
int mask = ~(1 << client);
assert(client < DIRTY_MEMORY_NUM);
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length)
{
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
cpu_physical_memory_set_dirty(addr);
}
xen_modified_memory(addr, length);
}
static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
cpu_physical_memory_clear_dirty_flag(addr, client);
}
}
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
unsigned client);
#endif
#endif
|