[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v8 09/14] softmmu: Honor the new exclusive bitmap
From: |
Alvise Rigo |
Subject: |
[Qemu-devel] [RFC v8 09/14] softmmu: Honor the new exclusive bitmap |
Date: |
Tue, 19 Apr 2016 15:39:26 +0200 |
The pages set as exclusive (clean) in the DIRTY_MEMORY_EXCLUSIVE bitmap
have to have their TLB entries flagged with TLB_EXCL. The accesses to
pages with TLB_EXCL flag set have to be properly handled in that they
can potentially invalidate an open LL/SC transaction.
Modify the TLB entries generation to honor the new bitmap and extend
the softmmu_template to handle the accesses made to guest pages marked
as exclusive. The TLB_EXCL flag is used only for normal RAM memory.
Exclusive accesses to MMIO memory are still not supported, but they will
with the next patch.
Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
cputlb.c | 36 ++++++++++++++++++++++++++----
softmmu_template.h | 65 +++++++++++++++++++++++++++++++++++++++++++++++-------
2 files changed, 89 insertions(+), 12 deletions(-)
diff --git a/cputlb.c b/cputlb.c
index 02b0d14..e5df3a5 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -416,11 +416,20 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong
vaddr,
|| memory_region_is_romd(section->mr)) {
/* Write access calls the I/O callback. */
te->addr_write = address | TLB_MMIO;
- } else if (memory_region_is_ram(section->mr)
- && cpu_physical_memory_is_clean(section->mr->ram_addr
- + xlat)) {
- te->addr_write = address | TLB_NOTDIRTY;
} else {
+ if (memory_region_is_ram(section->mr)
+ && cpu_physical_memory_is_clean(section->mr->ram_addr
+ + xlat)) {
+ address |= TLB_NOTDIRTY;
+ }
+ /* Only normal RAM accesses need the TLB_EXCL flag to handle
+ * exclusive store operatoins. */
+ if (!(address & TLB_MMIO) &&
+ cpu_physical_memory_is_excl(section->mr->ram_addr + xlat))
{
+ /* There is at least one vCPU that has flagged the address as
+ * exclusive. */
+ address |= TLB_EXCL;
+ }
te->addr_write = address;
}
} else {
@@ -496,6 +505,25 @@ static inline void excl_history_put_addr(hwaddr addr)
excl_history.c_array[excl_history.last_idx] = addr & TARGET_PAGE_MASK;
}
+/* For every vCPU compare the exclusive address and reset it in case of a
+ * match. Since only one vCPU is running at once, no lock has to be held to
+ * guard this operation. */
+static inline void reset_other_cpus_colliding_ll_addr(hwaddr addr, hwaddr size)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ if (current_cpu != cpu &&
+ cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
+ ranges_overlap(cpu->excl_protected_range.begin,
+ cpu->excl_protected_range.end -
+ cpu->excl_protected_range.begin,
+ addr, size)) {
+ cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
+ }
+ }
+}
+
#define MMUSUFFIX _mmu
/* Generates LoadLink/StoreConditional helpers in softmmu_template.h */
diff --git a/softmmu_template.h b/softmmu_template.h
index ede1240..2934a0c 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -469,6 +469,43 @@ static inline void smmu_helper(do_ram_store)(CPUArchState
*env,
#endif
}
+static inline void smmu_helper(do_excl_store)(CPUArchState *env,
+ bool little_endian,
+ DATA_TYPE val, target_ulong addr,
+ TCGMemOpIdx oi, int index,
+ uintptr_t retaddr)
+{
+ CPUIOTLBEntry *iotlbentry = &env->iotlb[get_mmuidx(oi)][index];
+ CPUState *cpu = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ /* The slow-path has been forced since we are writing to
+ * exclusive-protected memory. */
+ hwaddr hw_addr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+
+ /* The function reset_other_cpus_colliding_ll_addr could have reset
+ * the exclusive address. Fail the SC in this case.
+ * N.B.: here excl_succeed == true means that the caller is
+ * helper_stcond_name in softmmu_llsc_template.
+ * On the contrary, excl_succeeded == false occurs when a VCPU is
+ * writing through normal store to a page with TLB_EXCL bit set. */
+ if (cpu->excl_succeeded) {
+ if (!cc->cpu_valid_excl_access(cpu, hw_addr, DATA_SIZE)) {
+ /* The vCPU is SC-ing to an unprotected address. */
+ cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
+ cpu->excl_succeeded = false;
+
+ return;
+ }
+ }
+
+ smmu_helper(do_ram_store)(env, little_endian, val, addr, oi,
+ get_mmuidx(oi), index, retaddr);
+
+ reset_other_cpus_colliding_ll_addr(hw_addr, DATA_SIZE);
+
+ return;
+}
+
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
@@ -493,11 +530,17 @@ void helper_le_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
- /* Handle an IO access. */
+ /* Handle an IO access or exclusive access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- smmu_helper(do_mmio_store)(env, true, val, addr, oi, mmu_idx, index,
- retaddr);
- return;
+ if (tlb_addr & TLB_EXCL) {
+ smmu_helper(do_excl_store)(env, true, val, addr, oi, index,
+ retaddr);
+ return;
+ } else {
+ smmu_helper(do_mmio_store)(env, true, val, addr, oi, mmu_idx,
+ index, retaddr);
+ return;
+ }
}
smmu_helper(do_ram_store)(env, true, val, addr, oi, mmu_idx, index,
@@ -529,11 +572,17 @@ void helper_be_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
- /* Handle an IO access. */
+ /* Handle an IO access or exclusive access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- smmu_helper(do_mmio_store)(env, false, val, addr, oi, mmu_idx, index,
- retaddr);
- return;
+ if (tlb_addr & TLB_EXCL) {
+ smmu_helper(do_excl_store)(env, false, val, addr, oi, index,
+ retaddr);
+ return;
+ } else {
+ smmu_helper(do_mmio_store)(env, false, val, addr, oi, mmu_idx,
+ index, retaddr);
+ return;
+ }
}
smmu_helper(do_ram_store)(env, false, val, addr, oi, mmu_idx, index,
--
2.8.0
- [Qemu-devel] [RFC v8 00/14] Slow-path for atomic instruction translation, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 03/14] softmmu: Simplify helper_*_st_name, wrap MMIO code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 05/14] softmmu: Add new TLB_EXCL flag, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 04/14] softmmu: Simplify helper_*_st_name, wrap RAM code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 01/14] exec.c: Add new exclusive bitmap to ram_list, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 06/14] qom: cpu: Add CPUClass hooks for exclusive range, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 09/14] softmmu: Honor the new exclusive bitmap,
Alvise Rigo <=
- [Qemu-devel] [RFC v8 11/14] tcg: Create new runtime helpers for excl accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 10/14] softmmu: Support MMIO exclusive accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 08/14] softmmu: Add history of excl accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 12/14] target-arm: translate: Use ld/st excl for atomic insns, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 02/14] softmmu: Simplify helper_*_st_name, wrap unaligned code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 07/14] softmmu: Add helpers for a new slowpath, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 14/14] target-arm: aarch64: Use ls/st exclusive for atomic insns, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 13/14] target-arm: cpu64: use custom set_excl hook, Alvise Rigo, 2016/04/19