[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v7 13/16] softmmu: Add history of excl accesses
From: |
Alvise Rigo |
Subject: |
[Qemu-devel] [RFC v7 13/16] softmmu: Add history of excl accesses |
Date: |
Fri, 29 Jan 2016 10:32:42 +0100 |
Add a circular buffer to store the hw addresses used in the last
EXCLUSIVE_HISTORY_LEN exclusive accesses.
When an address is pop'ed from the buffer, its page will be set as not
exclusive. In this way, we avoid:
- frequent set/unset of a page (causing frequent flushes as well)
- the possibility to forget the EXCL bit set.
Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
cputlb.c | 29 +++++++++++++++++++----------
exec.c | 19 +++++++++++++++++++
include/qom/cpu.h | 8 ++++++++
softmmu_llsc_template.h | 1 +
vl.c | 3 +++
5 files changed, 50 insertions(+), 10 deletions(-)
diff --git a/cputlb.c b/cputlb.c
index 06ce2da..f3c4d97 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -395,16 +395,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong
vaddr,
env->tlb_v_table[mmu_idx][vidx] = *te;
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
- if (unlikely(!(te->addr_write & TLB_MMIO) && (te->addr_write & TLB_EXCL)))
{
- /* We are removing an exclusive entry, set the page to dirty. This
- * is not be necessary if the vCPU has performed both SC and LL. */
- hwaddr hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) +
- (te->addr_write & TARGET_PAGE_MASK);
- if (!cpu->ll_sc_context) {
- cpu_physical_memory_unset_excl(hw_addr);
- }
- }
-
/* refill the tlb */
env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
env->iotlb[mmu_idx][index].attrs = attrs;
@@ -517,6 +507,25 @@ static inline bool lookup_and_reset_cpus_ll_addr(hwaddr
addr, hwaddr size)
return ret;
}
+extern CPUExclusiveHistory excl_history;
+static inline void excl_history_put_addr(hwaddr addr)
+{
+ hwaddr last;
+
+ /* Calculate the index of the next exclusive address */
+ excl_history.last_idx = (excl_history.last_idx + 1) % excl_history.length;
+
+ last = excl_history.c_array[excl_history.last_idx];
+
+ /* Unset EXCL bit of the oldest entry */
+ if (last != EXCLUSIVE_RESET_ADDR) {
+ cpu_physical_memory_unset_excl(last);
+ }
+
+ /* Add a new address, overwriting the oldest one */
+ excl_history.c_array[excl_history.last_idx] = addr & TARGET_PAGE_MASK;
+}
+
#define MMUSUFFIX _mmu
/* Generates LoadLink/StoreConditional helpers in softmmu_template.h */
diff --git a/exec.c b/exec.c
index 51f366d..2e123f1 100644
--- a/exec.c
+++ b/exec.c
@@ -177,6 +177,25 @@ struct CPUAddressSpace {
MemoryListener tcg_as_listener;
};
+/* Exclusive memory support */
+CPUExclusiveHistory excl_history;
+void cpu_exclusive_history_init(void)
+{
+ /* Initialize exclusive history for atomic instruction handling. */
+ if (tcg_enabled()) {
+ g_assert(EXCLUSIVE_HISTORY_CPU_LEN * max_cpus <= UINT16_MAX);
+ excl_history.length = EXCLUSIVE_HISTORY_CPU_LEN * max_cpus;
+ excl_history.c_array = g_malloc(excl_history.length * sizeof(hwaddr));
+ memset(excl_history.c_array, -1, excl_history.length * sizeof(hwaddr));
+ }
+}
+
+void cpu_exclusive_history_free(void)
+{
+ if (tcg_enabled()) {
+ g_free(excl_history.c_array);
+ }
+}
#endif
#if !defined(CONFIG_USER_ONLY)
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 6f6c1c0..0452fd0 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -227,7 +227,15 @@ struct kvm_run;
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* Atomic insn translation TLB support. */
+typedef struct CPUExclusiveHistory {
+ uint16_t last_idx; /* index of last insertion */
+ uint16_t length; /* history's length, it depends on smp_cpus */
+ hwaddr *c_array; /* history's circular array */
+} CPUExclusiveHistory;
#define EXCLUSIVE_RESET_ADDR ULLONG_MAX
+#define EXCLUSIVE_HISTORY_CPU_LEN 256
+void cpu_exclusive_history_init(void);
+void cpu_exclusive_history_free(void);
/**
* CPUState:
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
index b4712ba..b4e7f9d 100644
--- a/softmmu_llsc_template.h
+++ b/softmmu_llsc_template.h
@@ -75,6 +75,7 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong
addr,
* to request any flush. */
if (!cpu_physical_memory_is_excl(hw_addr)) {
cpu_physical_memory_set_excl(hw_addr);
+ excl_history_put_addr(hw_addr);
CPU_FOREACH(cpu) {
if (current_cpu != cpu) {
tlb_flush(cpu, 1);
diff --git a/vl.c b/vl.c
index f043009..b22d99b 100644
--- a/vl.c
+++ b/vl.c
@@ -547,6 +547,7 @@ static void res_free(void)
{
g_free(boot_splash_filedata);
boot_splash_filedata = NULL;
+ cpu_exclusive_history_free();
}
static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp)
@@ -4322,6 +4323,8 @@ int main(int argc, char **argv, char **envp)
configure_accelerator(current_machine);
+ cpu_exclusive_history_init();
+
if (qtest_chrdev) {
qtest_init(qtest_chrdev, qtest_log, &error_fatal);
}
--
2.7.0
- [Qemu-devel] [RFC v7 08/16] softmmu: Honor the new exclusive bitmap, (continued)
- [Qemu-devel] [RFC v7 08/16] softmmu: Honor the new exclusive bitmap, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 01/16] exec.c: Add new exclusive bitmap to ram_list, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 04/16] softmmu: Simplify helper_*_st_name, wrap RAM code, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 09/16] softmmu: Include MMIO/invalid exclusive accesses, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 15/16] target-arm: cpu64: use custom set_excl hook, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 10/16] softmmu: Protect MMIO exclusive range, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 14/16] target-arm: translate: Use ld/st excl for atomic insns, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 16/16] target-arm: aarch64: add atomic instructions, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 07/16] softmmu: Add helpers for a new slowpath, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 12/16] configure: Use slow-path for atomic only when the softmmu is enabled, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 13/16] softmmu: Add history of excl accesses,
Alvise Rigo <=
- [Qemu-devel] [RFC v7 11/16] tcg: Create new runtime helpers for excl accesses, Alvise Rigo, 2016/01/29