[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 11/11] target/i386: clear C-bit when walking SEV guest page table
From: |
Ashish Kalra |
Subject: |
[PATCH 11/11] target/i386: clear C-bit when walking SEV guest page table |
Date: |
Mon, 16 Nov 2020 18:53:02 +0000 |
From: Brijesh Singh <brijesh.singh@amd.com>
In SEV-enabled guest the pte entry will have C-bit set, we need to clear
the C-bit when walking the page table.
This ensures that the proper page address translation occurs and, with the
C-bit reset, the true physical address is got.
The pte_mask to be used during guest page table walk is added as a
vendor specific assist/hook as part of the new MemoryDebugOps and
available via the new debug API interface cpu_physical_memory_pte_mask_debug().
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
include/exec/cpu-common.h | 3 ++
include/exec/memory.h | 1 +
softmmu/physmem.c | 13 +++++++-
target/i386/monitor.c | 70 +++++++++++++++++++++++++--------------
target/i386/sev.c | 3 +-
5 files changed, 63 insertions(+), 27 deletions(-)
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index d2089e6873..3374573d39 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -96,6 +96,9 @@ static inline void cpu_physical_memory_write(hwaddr addr,
{
cpu_physical_memory_rw(addr, (void *)buf, len, true);
}
+
+uint64_t cpu_physical_memory_pte_mask_debug(void);
+
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
bool is_write);
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 74f2dcec00..ebe8ffc1eb 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -2428,6 +2428,7 @@ typedef struct MemoryDebugOps {
MemTxResult (*write)(AddressSpace *as, hwaddr phys_addr,
MemTxAttrs attrs, const void *buf,
hwaddr len);
+ uint64_t (*pte_mask)(void);
} MemoryDebugOps;
void address_space_set_debug_ops(const MemoryDebugOps *ops);
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 6945bd5efe..fc6b5588fc 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -166,9 +166,15 @@ struct DirtyBitmapSnapshot {
unsigned long dirty[];
};
+static uint64_t address_space_pte_mask(void)
+{
+ return ~0;
+}
+
static const MemoryDebugOps default_debug_ops = {
.read = address_space_read,
- .write = address_space_write_rom
+ .write = address_space_write_rom,
+ .pte_mask = address_space_pte_mask
};
static const MemoryDebugOps *debug_ops = &default_debug_ops;
@@ -3401,6 +3407,11 @@ void cpu_physical_memory_rw_debug(hwaddr addr, uint8_t
*buf,
}
+uint64_t cpu_physical_memory_pte_mask_debug(void)
+{
+ return debug_ops->pte_mask();
+}
+
int64_t address_space_cache_init(MemoryRegionCache *cache,
AddressSpace *as,
hwaddr addr,
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 9ca9c677a5..c73cac04cb 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -106,16 +106,20 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState
*env)
unsigned int l1, l2, l3;
uint64_t pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
+ uint64_t me_mask;
+
+ me_mask = cpu_physical_memory_pte_mask_debug();
pdp_addr = env->cr[3] & ~0x1f;
+ pdp_addr &= me_mask;
for (l1 = 0; l1 < 4; l1++) {
cpu_physical_memory_read_debug(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = le64_to_cpu(pdpe & me_mask);
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
cpu_physical_memory_read_debug(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = le64_to_cpu(pde & me_mask);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
/* 2M pages with PAE, CR4.PSE is ignored */
@@ -126,7 +130,7 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
for (l3 = 0; l3 < 512; l3++) {
cpu_physical_memory_read_debug(pt_addr + l3 * 8,
&pte, 8);
- pte = le64_to_cpu(pte);
+ pte = le64_to_cpu(pte & me_mask);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l1 << 30) + (l2 << 21)
+ (l3 << 12),
@@ -148,10 +152,13 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
+ uint64_t me_mask;
+
+ me_mask = cpu_physical_memory_pte_mask_debug();
for (l1 = 0; l1 < 512; l1++) {
cpu_physical_memory_read_debug(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = le64_to_cpu(pml4e & me_mask);
if (!(pml4e & PG_PRESENT_MASK)) {
continue;
}
@@ -159,7 +166,7 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
cpu_physical_memory_read_debug(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = le64_to_cpu(pdpe & me_mask);
if (!(pdpe & PG_PRESENT_MASK)) {
continue;
}
@@ -174,7 +181,7 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
cpu_physical_memory_read_debug(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = le64_to_cpu(pde & me_mask);
if (!(pde & PG_PRESENT_MASK)) {
continue;
}
@@ -191,7 +198,7 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
cpu_physical_memory_read_debug(pt_addr
+ l4 * 8,
&pte, 8);
- pte = le64_to_cpu(pte);
+ pte = le64_to_cpu(pte & me_mask);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l0 << 48) + (l1 << 39) +
(l2 << 30) + (l3 << 21) + (l4 << 12),
@@ -208,13 +215,17 @@ static void tlb_info_la57(Monitor *mon, CPUArchState *env)
uint64_t l0;
uint64_t pml5e;
uint64_t pml5_addr;
+ uint64_t me_mask;
- pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
+ me_mask = cpu_physical_memory_pte_mask_debug();
+
+ pml5_addr = env->cr[3] & 0x3fffffffff000ULL & me_mask;
for (l0 = 0; l0 < 512; l0++) {
cpu_physical_memory_read_debug(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = le64_to_cpu(pml5e & me_mask);
if (pml5e & PG_PRESENT_MASK) {
- tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
+ tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL &
+ cpu_physical_memory_pte_mask_debug());
}
}
}
@@ -326,19 +337,22 @@ static void mem_info_pae32(Monitor *mon, CPUArchState
*env)
uint64_t pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
hwaddr start, end;
+ uint64_t me_mask;
- pdp_addr = env->cr[3] & ~0x1f;
+ me_mask = cpu_physical_memory_pte_mask_debug();
+
+ pdp_addr = env->cr[3] & ~0x1f & me_mask;
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 4; l1++) {
cpu_physical_memory_read_debug(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = le64_to_cpu(pdpe & me_mask);
end = l1 << 30;
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
cpu_physical_memory_read_debug(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = le64_to_cpu(pde & me_mask);
end = (l1 << 30) + (l2 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -350,7 +364,7 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env)
for (l3 = 0; l3 < 512; l3++) {
cpu_physical_memory_read_debug(pt_addr + l3 * 8,
&pte, 8);
- pte = le64_to_cpu(pte);
+ pte = le64_to_cpu(pte & me_mask);
end = (l1 << 30) + (l2 << 21) + (l3 << 12);
if (pte & PG_PRESENT_MASK) {
prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
@@ -383,19 +397,22 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
+ uint64_t me_mask;
+
+ me_mask = cpu_physical_memory_pte_mask_debug();
- pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+ pml4_addr = env->cr[3] & 0x3fffffffff000ULL & me_mask;
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 512; l1++) {
cpu_physical_memory_read_debug(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = le64_to_cpu(pml4e & me_mask);
end = l1 << 39;
if (pml4e & PG_PRESENT_MASK) {
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
cpu_physical_memory_read_debug(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = le64_to_cpu(pdpe & me_mask);
end = (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
if (pdpe & PG_PSE_MASK) {
@@ -408,7 +425,7 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
for (l3 = 0; l3 < 512; l3++) {
cpu_physical_memory_read_debug(pd_addr + l3 * 8,
&pde, 8);
- pde = le64_to_cpu(pde);
+ pde = le64_to_cpu(pde & me_mask);
end = (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -423,7 +440,7 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
cpu_physical_memory_read_debug(pt_addr
+ l4 * 8,
&pte, 8);
- pte = le64_to_cpu(pte);
+ pte = le64_to_cpu(pte & me_mask);
end = (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
@@ -464,13 +481,16 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
uint64_t l0, l1, l2, l3, l4;
uint64_t pml5e, pml4e, pdpe, pde, pte;
uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
+ uint64_t me_mask;
+
+ me_mask = cpu_physical_memory_pte_mask_debug();
- pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
+ pml5_addr = env->cr[3] & 0x3fffffffff000ULL & me_mask;
last_prot = 0;
start = -1;
for (l0 = 0; l0 < 512; l0++) {
cpu_physical_memory_read_debug(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = le64_to_cpu(pml5e & me_mask);
end = l0 << 48;
if (!(pml5e & PG_PRESENT_MASK)) {
prot = 0;
@@ -481,7 +501,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pml4_addr = pml5e & 0x3fffffffff000ULL;
for (l1 = 0; l1 < 512; l1++) {
cpu_physical_memory_read_debug(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = le64_to_cpu(pml4e & me_mask);
end = (l0 << 48) + (l1 << 39);
if (!(pml4e & PG_PRESENT_MASK)) {
prot = 0;
@@ -492,7 +512,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
cpu_physical_memory_read_debug(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = le64_to_cpu(pdpe & me_mask);
end = (l0 << 48) + (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
prot = 0;
@@ -511,7 +531,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
cpu_physical_memory_read_debug(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = le64_to_cpu(pde & me_mask);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
prot = 0;
@@ -531,7 +551,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
for (l4 = 0; l4 < 512; l4++) {
cpu_physical_memory_read_debug(pt_addr + l4 * 8,
&pte, 8);
- pte = le64_to_cpu(pte);
+ pte = le64_to_cpu(pte & me_mask);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
diff --git a/target/i386/sev.c b/target/i386/sev.c
index b942593bc8..97726a639f 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -1003,7 +1003,8 @@ MemTxResult sev_address_space_read_debug(AddressSpace
*as, hwaddr addr,
static const MemoryDebugOps sev_debug_ops = {
.read = sev_address_space_read_debug,
- .write = sev_address_space_write_rom_debug
+ .write = sev_address_space_write_rom_debug,
+ .pte_mask = sev_get_me_mask,
};
void
--
2.17.1
- [PATCH 01/11] memattrs: add debug attribute, (continued)
- [PATCH 01/11] memattrs: add debug attribute, Ashish Kalra, 2020/11/16
- [PATCH 02/11] exec: Add new MemoryDebugOps., Ashish Kalra, 2020/11/16
- [PATCH 03/11] exec: add ram_debug_ops support, Ashish Kalra, 2020/11/16
- [PATCH 04/11] exec: Add address_space_read and address_space_write debug helpers., Ashish Kalra, 2020/11/16
- [PATCH 05/11] exec: add debug version of physical memory read and write API, Ashish Kalra, 2020/11/16
- [PATCH 06/11] monitor/i386: use debug APIs when accessing guest memory, Ashish Kalra, 2020/11/16
- [PATCH 07/11] kvm: introduce debug memory encryption API, Ashish Kalra, 2020/11/16
- [PATCH 08/11] sev/i386: add debug encrypt and decrypt commands, Ashish Kalra, 2020/11/16
- [PATCH 09/11] hw/i386: set ram_debug_ops when memory encryption is enabled, Ashish Kalra, 2020/11/16
- [PATCH 10/11] sev/i386: add SEV specific MemoryDebugOps., Ashish Kalra, 2020/11/16
- [PATCH 11/11] target/i386: clear C-bit when walking SEV guest page table,
Ashish Kalra <=