aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2009-01-22 16:59:16 +0000
committeraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2009-01-22 16:59:16 +0000
commitba223c29da480b40b38678c66636ee9910973a47 (patch)
treef9c9ce3927ed8ed1e16bc1f21c627db1a9fcab14
parent6d16c2f88f2a866bec27c4d170ddd97ee8e41a0e (diff)
Add map client retry notification (Avi Kivity)
The target memory mapping API may fail if the bounce buffer resources are exhausted. Add a notification mechanism to allow clients to retry the mapping operation when resources become available again. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6395 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--cpu-all.h2
-rw-r--r--exec.c40
2 files changed, 42 insertions, 0 deletions
diff --git a/cpu-all.h b/cpu-all.h
index 22ffaa7b29..e71bd0601c 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -928,6 +928,8 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
int is_write);
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
int is_write, target_phys_addr_t access_len);
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+void cpu_unregister_map_client(void *cookie);
uint32_t ldub_phys(target_phys_addr_t addr);
uint32_t lduw_phys(target_phys_addr_t addr);
diff --git a/exec.c b/exec.c
index 6dd88fce45..56e5e48869 100644
--- a/exec.c
+++ b/exec.c
@@ -3053,10 +3053,49 @@ typedef struct {
static BounceBuffer bounce;
+typedef struct MapClient {
+ void *opaque;
+ void (*callback)(void *opaque);
+ LIST_ENTRY(MapClient) link;
+} MapClient;
+
+static LIST_HEAD(map_client_list, MapClient) map_client_list
+ = LIST_HEAD_INITIALIZER(map_client_list);
+
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+{
+ MapClient *client = qemu_malloc(sizeof(*client));
+
+ client->opaque = opaque;
+ client->callback = callback;
+ LIST_INSERT_HEAD(&map_client_list, client, link);
+ return client;
+}
+
+void cpu_unregister_map_client(void *_client)
+{
+ MapClient *client = (MapClient *)_client;
+
+ LIST_REMOVE(client, link);
+}
+
+static void cpu_notify_map_clients(void)
+{
+ MapClient *client;
+
+ while (!LIST_EMPTY(&map_client_list)) {
+ client = LIST_FIRST(&map_client_list);
+ client->callback(client->opaque);
+ LIST_REMOVE(client, link);
+ }
+}
+
/* Map a physical memory region into a host virtual address.
* May map a subset of the requested range, given by and returned in *plen.
* May return NULL if resources needed to perform the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
*/
void *cpu_physical_memory_map(target_phys_addr_t addr,
target_phys_addr_t *plen,
@@ -3146,6 +3185,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
}
qemu_free(bounce.buffer);
bounce.buffer = NULL;
+ cpu_notify_map_clients();
}
/* warning: addr must be aligned */