qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [patch v4 09/16] memory: introduce mmio request pending to


From: Liu Ping Fan
Subject: [Qemu-devel] [patch v4 09/16] memory: introduce mmio request pending to anti nested DMA
Date: Mon, 22 Oct 2012 17:23:52 +0800

Rejecting the nested mmio request which does not aim at RAM, so we
can avoid the potential deadlock caused by the random lock sequence
of two device's local lock.

Signed-off-by: Liu Ping Fan <address@hidden>
---
 cpus.c              |   14 ++++++++++++++
 exec.c              |   50 ++++++++++++++++++++++++++++++++++++--------------
 hw/hw.h             |    1 +
 kvm-all.c           |    2 ++
 qemu-thread-posix.h |    3 +++
 qemu-thread.h       |    2 ++
 6 files changed, 58 insertions(+), 14 deletions(-)

diff --git a/cpus.c b/cpus.c
index 4cd7f85..365a512 100644
--- a/cpus.c
+++ b/cpus.c
@@ -729,6 +729,18 @@ static void qemu_kvm_wait_io_event(CPUArchState *env)
     qemu_wait_io_event_common(env);
 }
 
+int get_context_type(void)
+{
+    QemuThread *t = pthread_getspecific(qemu_thread_key);
+    return t->context_type;
+}
+
+void set_context_type(int type)
+{
+    QemuThread *t = pthread_getspecific(qemu_thread_key);
+    t->context_type = type;
+}
+
 static void *qemu_kvm_cpu_thread_fn(void *arg)
 {
     CPUArchState *env = arg;
@@ -736,6 +748,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
     int r;
 
     pthread_setspecific(qemu_thread_key, cpu->thread);
+    set_context_type(0);
+
     qemu_mutex_lock(&qemu_global_mutex);
     qemu_thread_get_self(cpu->thread);
     env->thread_id = qemu_get_thread_id();
diff --git a/exec.c b/exec.c
index 91b859b..a0327a1 100644
--- a/exec.c
+++ b/exec.c
@@ -3490,7 +3490,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
     uint32_t val;
     target_phys_addr_t page;
     MemoryRegionSection *section, obj_mrs;
-    int safe_ref;
+    int safe_ref, nested_dma = 0;
+    QemuThread *thread = pthread_getspecific(qemu_thread_key);
+    int context = thread->context_type;
 
     while (len > 0) {
         page = addr & TARGET_PAGE_MASK;
@@ -3500,7 +3502,8 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
         qemu_mutex_lock(&mem_map_lock);
         safe_ref = phys_page_lookup(page, &obj_mrs);
         qemu_mutex_unlock(&mem_map_lock);
-        if (safe_ref == 0) {
+
+        if (safe_ref == 0 && context == 1) {
             qemu_mutex_lock_iothread();
             qemu_mutex_lock(&mem_map_lock);
             /* At the 2nd try, mem map can change, so need to judge it again */
@@ -3511,7 +3514,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
             }
         }
         section = &obj_mrs;
-
+        if (context == 1) {
+            nested_dma = thread->mmio_request_pending++ > 1 ? 1 : 0;
+        }
         if (is_write) {
             if (!memory_region_is_ram(section->mr)) {
                 target_phys_addr_t addr1;
@@ -3521,17 +3526,23 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
                 if (l >= 4 && ((addr1 & 3) == 0)) {
                     /* 32 bit write access */
                     val = ldl_p(buf);
-                    io_mem_write(section->mr, addr1, val, 4);
+                    if (!nested_dma) {
+                        io_mem_write(section->mr, addr1, val, 4);
+                    }
                     l = 4;
                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
                     /* 16 bit write access */
                     val = lduw_p(buf);
-                    io_mem_write(section->mr, addr1, val, 2);
+                    if (!nested_dma) {
+                        io_mem_write(section->mr, addr1, val, 2);
+                    }
                     l = 2;
                 } else {
                     /* 8 bit write access */
                     val = ldub_p(buf);
-                    io_mem_write(section->mr, addr1, val, 1);
+                    if (!nested_dma) {
+                        io_mem_write(section->mr, addr1, val, 1);
+                    }
                     l = 1;
                 }
             } else if (!section->readonly) {
@@ -3552,24 +3563,31 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
             }
         } else {
             if (!(memory_region_is_ram(section->mr) ||
-                  memory_region_is_romd(section->mr))) {
+                  memory_region_is_romd(section->mr)) &&
+                    !nested_dma) {
                 target_phys_addr_t addr1;
                 /* I/O case */
                 addr1 = memory_region_section_addr(section, addr);
                 if (l >= 4 && ((addr1 & 3) == 0)) {
                     /* 32 bit read access */
-                    val = io_mem_read(section->mr, addr1, 4);
-                    stl_p(buf, val);
+                    if (!nested_dma) {
+                        val = io_mem_read(section->mr, addr1, 4);
+                        stl_p(buf, val);
+                    }
                     l = 4;
                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
                     /* 16 bit read access */
-                    val = io_mem_read(section->mr, addr1, 2);
-                    stw_p(buf, val);
+                    if (!nested_dma) {
+                        val = io_mem_read(section->mr, addr1, 2);
+                        stw_p(buf, val);
+                    }
                     l = 2;
                 } else {
                     /* 8 bit read access */
-                    val = io_mem_read(section->mr, addr1, 1);
-                    stb_p(buf, val);
+                    if (!nested_dma) {
+                        val = io_mem_read(section->mr, addr1, 1);
+                        stb_p(buf, val);
+                    }
                     l = 1;
                 }
             } else {
@@ -3586,7 +3604,11 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
         len -= l;
         buf += l;
         addr += l;
-        if (safe_ref == 0) {
+
+        if (context == 1) {
+            thread->mmio_request_pending--;
+        }
+        if (safe_ref == 0 && context == 1) {
             qemu_mutex_unlock_iothread();
         }
     }
diff --git a/hw/hw.h b/hw/hw.h
index e5cb9bf..935b045 100644
--- a/hw/hw.h
+++ b/hw/hw.h
@@ -12,6 +12,7 @@
 #include "irq.h"
 #include "qemu-file.h"
 #include "vmstate.h"
+#include "qemu-thread.h"
 
 #ifdef NEED_CPU_H
 #if TARGET_LONG_BITS == 64
diff --git a/kvm-all.c b/kvm-all.c
index 34b02c1..b3fa597 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1562,10 +1562,12 @@ int kvm_cpu_exec(CPUArchState *env)
             break;
         case KVM_EXIT_MMIO:
             DPRINTF("handle_mmio\n");
+            set_context_type(1);
             cpu_physical_memory_rw(run->mmio.phys_addr,
                                    run->mmio.data,
                                    run->mmio.len,
                                    run->mmio.is_write);
+            set_context_type(0);
             ret = 0;
             break;
         case KVM_EXIT_IRQ_WINDOW_OPEN:
diff --git a/qemu-thread-posix.h b/qemu-thread-posix.h
index 2607b1c..9fcc6f8 100644
--- a/qemu-thread-posix.h
+++ b/qemu-thread-posix.h
@@ -12,6 +12,9 @@ struct QemuCond {
 
 struct QemuThread {
     pthread_t thread;
+    /* 0 clean; 1 mmio; 2 io */
+    int context_type;
+    int mmio_request_pending;
 };
 
 extern pthread_key_t qemu_thread_key;
diff --git a/qemu-thread.h b/qemu-thread.h
index 4a6427d..88eaf94 100644
--- a/qemu-thread.h
+++ b/qemu-thread.h
@@ -45,6 +45,8 @@ void *qemu_thread_join(QemuThread *thread);
 void qemu_thread_get_self(QemuThread *thread);
 bool qemu_thread_is_self(QemuThread *thread);
 void qemu_thread_exit(void *retval);
+int get_context_type(void);
+void set_context_type(int type);
 
 void qemu_thread_key_create(void);
 #endif
-- 
1.7.4.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]