aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2005-01-28 22:37:22 +0000
committerbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2005-01-28 22:37:22 +0000
commit8df1cd076cc14d1d4fc456c6d7d1ceb257781942 (patch)
tree0e11aae2a7a168e941609549c536c270344bf816 /exec.c
parentbb05683b1271517b5f78aff1890e37f0d27b4d2d (diff)
physical memory access functions
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1249 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c105
1 files changed, 105 insertions, 0 deletions
diff --git a/exec.c b/exec.c
index 0dd14f9e24..48cbe85749 100644
--- a/exec.c
+++ b/exec.c
@@ -2032,6 +2032,21 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
addr += l;
}
}
+
+/* never used */
+uint32_t ldl_phys(target_phys_addr_t addr)
+{
+ return 0;
+}
+
+void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
+{
+}
+
+void stl_phys(target_phys_addr_t addr, uint32_t val)
+{
+}
+
#else
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
int len, int is_write)
@@ -2118,6 +2133,96 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
addr += l;
}
}
+
+/* warning: addr must be aligned */
+uint32_t ldl_phys(target_phys_addr_t addr)
+{
+ int io_index;
+ uint8_t *ptr;
+ uint32_t val;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
+ (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
+ /* I/O case */
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ } else {
+ /* RAM case */
+ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ val = ldl_p(ptr);
+ }
+ return val;
+}
+
+/* warning: addr must be aligned. The ram page is not masked as dirty
+ and the code inside is not invalidated. It is useful if the dirty
+ bits are used to track modified PTEs */
+void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
+{
+ int io_index;
+ uint8_t *ptr;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != 0) {
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ } else {
+ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ stl_p(ptr, val);
+ }
+}
+
+/* warning: addr must be aligned */
+/* XXX: optimize code invalidation test */
+void stl_phys(target_phys_addr_t addr, uint32_t val)
+{
+ int io_index;
+ uint8_t *ptr;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != 0) {
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ } else {
+ unsigned long addr1;
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ /* RAM case */
+ ptr = phys_ram_base + addr1;
+ stl_p(ptr, val);
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1;
+ }
+}
+
#endif
/* virtual memory access for debug */