[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH 08/12] exec/memory_ldst_cached: Check address alignment if re
From: |
Philippe Mathieu-Daudé |
Subject: |
[RFC PATCH 08/12] exec/memory_ldst_cached: Check address alignment if requested |
Date: |
Thu, 20 May 2021 13:09:15 +0200 |
If the caller requires strict alignment, check the address
satisfies it before doing the transaction. Otherwise return
a MEMTX_UNALIGNED_ERROR.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
include/exec/memory_ldst_cached.h.inc | 42 +++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/include/exec/memory_ldst_cached.h.inc
b/include/exec/memory_ldst_cached.h.inc
index 515beb48f47..311a9759a22 100644
--- a/include/exec/memory_ldst_cached.h.inc
+++ b/include/exec/memory_ldst_cached.h.inc
@@ -31,6 +31,13 @@ static inline uint16_t
ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
assert(addr < cache->len && 2 <= cache->len - addr);
fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr);
if (likely(cache->ptr)) {
+ if (attrs.aligned && unlikely(!QEMU_PTR_IS_ALIGNED(cache->ptr,
+ sizeof(uint16_t))))
{
+ if (result) {
+ *result = MEMTX_UNALIGNED_ERROR;
+ }
+ return (uint16_t)-1; /* XXX */
+ }
if (result) {
*result = MEMTX_OK;
}
@@ -47,6 +54,13 @@ static inline uint32_t
ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache,
assert(addr < cache->len && 4 <= cache->len - addr);
fuzz_dma_read_cb(cache->xlat + addr, 4, cache->mrs.mr);
if (likely(cache->ptr)) {
+ if (attrs.aligned && unlikely(!QEMU_PTR_IS_ALIGNED(cache->ptr,
+ sizeof(uint32_t))))
{
+ if (result) {
+ *result = MEMTX_UNALIGNED_ERROR;
+ }
+ return (uint32_t)-1; /* XXX */
+ }
if (result) {
*result = MEMTX_OK;
}
@@ -63,6 +77,13 @@ static inline uint64_t
ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
assert(addr < cache->len && 8 <= cache->len - addr);
fuzz_dma_read_cb(cache->xlat + addr, 8, cache->mrs.mr);
if (likely(cache->ptr)) {
+ if (attrs.aligned && unlikely(!QEMU_PTR_IS_ALIGNED(cache->ptr,
+ sizeof(uint64_t))))
{
+ if (result) {
+ *result = MEMTX_UNALIGNED_ERROR;
+ }
+ return (uint64_t)-1; /* XXX */
+ }
if (result) {
*result = MEMTX_OK;
}
@@ -89,6 +110,13 @@ static inline void
ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
{
assert(addr < cache->len && 2 <= cache->len - addr);
if (likely(cache->ptr)) {
+ if (attrs.aligned && unlikely(!QEMU_PTR_IS_ALIGNED(cache->ptr,
+ sizeof(uint16_t))))
{
+ if (result) {
+ *result = MEMTX_UNALIGNED_ERROR;
+ }
+ return;
+ }
if (result) {
*result = MEMTX_OK;
}
@@ -104,6 +132,13 @@ static inline void
ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
{
assert(addr < cache->len && 4 <= cache->len - addr);
if (likely(cache->ptr)) {
+ if (attrs.aligned && unlikely(!QEMU_PTR_IS_ALIGNED(cache->ptr,
+ sizeof(uint32_t))))
{
+ if (result) {
+ *result = MEMTX_UNALIGNED_ERROR;
+ }
+ return;
+ }
if (result) {
*result = MEMTX_OK;
}
@@ -119,6 +154,13 @@ static inline void
ADDRESS_SPACE_ST_CACHED(q)(MemoryRegionCache *cache,
{
assert(addr < cache->len && 8 <= cache->len - addr);
if (likely(cache->ptr)) {
+ if (attrs.aligned && unlikely(!QEMU_PTR_IS_ALIGNED(cache->ptr,
+ sizeof(uint64_t))))
{
+ if (result) {
+ *result = MEMTX_UNALIGNED_ERROR;
+ }
+ return;
+ }
if (result) {
*result = MEMTX_OK;
}
--
2.26.3
- [RFC PATCH 00/12] exec/memory: Experimental API to catch unaligned accesses, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 01/12] exec/memory_ldst: Use correct type sizes, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 02/12] exec/memattrs: Add attribute/error for address alignment, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 03/12] exec/memory_ldst: Return MEMTX_UNALIGNED_ERROR for unaligned addresses, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 04/12] exec/memory_ldst_cached: Sort declarations, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 05/12] exec/memory_ldst_cached: Use correct type size, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 06/12] exec/memory_ldst_cached: Set MemTxResult on success, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 07/12] exec/memory_ldst_cached: Document aligned addresses are expected, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 08/12] exec/memory_ldst_cached: Check address alignment if requested,
Philippe Mathieu-Daudé <=
- [RFC PATCH 09/12] hw/virtio: Use correct type sizes, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 10/12] hw/virtio: Extract virtio_lduw_phys_cached_with_attrs(), Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 12/12] hw/virtio: Display error if vring flag field is not aligned, Philippe Mathieu-Daudé, 2021/05/20
- [RFC PATCH 11/12] hw/virtio: Have vring_avail_flags() return a boolean value, Philippe Mathieu-Daudé, 2021/05/20
- Re: [RFC PATCH 00/12] exec/memory: Experimental API to catch unaligned accesses, Philippe Mathieu-Daudé, 2021/05/31