[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 10/15] x86_64: expand and shrink messages in copy{in, out}msg rou
From: |
Luca Dariz |
Subject: |
[PATCH 10/15] x86_64: expand and shrink messages in copy{in, out}msg routines |
Date: |
Tue, 28 Jun 2022 12:10:49 +0200 |
* i386/i386/copy_user.h: new file to handle 32/64 bit differences
(currently only msg_usize())
* include/mach/message.h: add mach_msg_user_header_t using port names
instead of ports
* ipc/ipc_kmsg.c:
- use mach_msg_user_header_t
* ipc/ipc_mqueue.c: use msg_usize() to check if we can actually
receive the message
* ipc/mach_msg.c: Likewise for continuations in receive path
* x86_64/Makefrag.am: add x86_64/copy_user.c
* x86_64/copy_user.c: new file to handle message expansion and
shrinking during copyinmsg/copyoutmsg for 64 bit kernels.
- port names -> port pointers on all 64-bit builds
- 32-bit pointer -> 64 bit pointer when using 32-bit userspace
* x86_64/locore.S: remove copyinmsg() and copyoutmsg()
Note that this depends on this change in mig for the correct size in msgh:
* fill msg size in the header for user stubs
---
i386/i386/copy_user.h | 22 ++++
include/mach/message.h | 11 ++
ipc/ipc_kmsg.c | 30 ++++-
ipc/ipc_mqueue.c | 5 +-
ipc/mach_msg.c | 19 ++-
x86_64/Makefrag.am | 1 +
x86_64/copy_user.c | 280 +++++++++++++++++++++++++++++++++++++++++
x86_64/locore.S | 79 ------------
8 files changed, 351 insertions(+), 96 deletions(-)
create mode 100644 i386/i386/copy_user.h
create mode 100644 x86_64/copy_user.c
diff --git a/i386/i386/copy_user.h b/i386/i386/copy_user.h
new file mode 100644
index 00000000..ab932401
--- /dev/null
+++ b/i386/i386/copy_user.h
@@ -0,0 +1,22 @@
+
+#ifndef COPY_USER_H
+#define COPY_USER_H
+
+#include <sys/types.h>
+
+#include <machine/locore.h>
+#include <mach/message.h>
+
+// XXX we could add another field to kmsg to store the user-side size, but
then we
+// should check if we can obtain it for rpc and notifications originating from
+// the kernel
+#ifndef __x86_64__
+static inline size_t msg_usize(const mach_msg_header_t *kmsg)
+{
+ return kmsg->msgh_size;
+}
+#else /* __x86_64__ */
+size_t msg_usize(const mach_msg_header_t *kmsg);
+#endif /* __x86_64__ */
+
+#endif /* COPY_USER_H */
diff --git a/include/mach/message.h b/include/mach/message.h
index 0a7297e1..e1a8d663 100644
--- a/include/mach/message.h
+++ b/include/mach/message.h
@@ -132,6 +132,7 @@ typedef unsigned int mach_msg_size_t;
typedef natural_t mach_msg_seqno_t;
typedef integer_t mach_msg_id_t;
+/* full header structure, may have different size in user/kernel spaces*/
typedef struct {
mach_msg_bits_t msgh_bits;
mach_msg_size_t msgh_size;
@@ -144,6 +145,16 @@ typedef struct {
mach_msg_id_t msgh_id;
} mach_msg_header_t;
+/* user-side header format, needed in the kernel */
+typedef struct {
+ mach_msg_bits_t msgh_bits;
+ mach_msg_size_t msgh_size;
+ mach_port_name_t msgh_remote_port;
+ mach_port_name_t msgh_local_port;
+ mach_port_seqno_t msgh_seqno;
+ mach_msg_id_t msgh_id;
+} mach_msg_user_header_t;
+
/*
* There is no fixed upper bound to the size of Mach messages.
*/
diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
index 09801924..8f7045ee 100644
--- a/ipc/ipc_kmsg.c
+++ b/ipc/ipc_kmsg.c
@@ -42,6 +42,7 @@
#include <mach/message.h>
#include <mach/port.h>
#include <machine/locore.h>
+#include <machine/copy_user.h>
#include <kern/assert.h>
#include <kern/kalloc.h>
#include <vm/vm_map.h>
@@ -503,7 +504,7 @@ ipc_kmsg_get(
{
ipc_kmsg_t kmsg;
- if ((size < sizeof(mach_msg_header_t)) || (size & 3))
+ if ((size < sizeof(mach_msg_user_header_t)) || (size & 3))
return MACH_SEND_MSG_TOO_SMALL;
if (size <= IKM_SAVED_MSG_SIZE) {
@@ -529,7 +530,6 @@ ipc_kmsg_get(
return MACH_SEND_INVALID_DATA;
}
- kmsg->ikm_header.msgh_size = size;
*kmsgp = kmsg;
return MACH_MSG_SUCCESS;
}
@@ -1393,7 +1393,19 @@ ipc_kmsg_copyin_body(
if (data == 0)
goto invalid_memory;
- if (copyinmap(map, (char *) addr,
+ if (sizeof(mach_port_name_t) !=
sizeof(mach_port_t))
+ {
+ mach_port_name_t *src =
(mach_port_name_t*)addr;
+ mach_port_t *dst = (mach_port_t*)data;
+ for (int i=0; i<number; i++)
+ *(dst + i) = *(src + i);
+ if (dealloc &&
+ (vm_deallocate(map, addr, length) !=
+ KERN_SUCCESS)) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+ } else if (copyinmap(map, (char *) addr,
(char *) data, length) ||
(dealloc &&
(vm_deallocate(map, addr, length) !=
@@ -2434,8 +2446,16 @@ ipc_kmsg_copyout_body(
} else if (is_port) {
/* copyout to memory allocated above */
- (void) copyoutmap(map, (char *) data,
- (char *) addr, length);
+ if (sizeof(mach_port_name_t) !=
sizeof(mach_port_t))
+ {
+ mach_port_t *src = (mach_port_t*)data;
+ mach_port_name_t *dst =
(mach_port_name_t*)addr;
+ for (int i=0; i<number; i++)
+ *(dst + i) = *(src + i);
+ } else {
+ (void) copyoutmap(map, (char *) data,
+ (char *) addr,
length);
+ }
kfree(data, length);
} else {
vm_map_copy_t copy = (vm_map_copy_t) data;
diff --git a/ipc/ipc_mqueue.c b/ipc/ipc_mqueue.c
index 9138aec4..50cbff93 100644
--- a/ipc/ipc_mqueue.c
+++ b/ipc/ipc_mqueue.c
@@ -36,6 +36,7 @@
#include <mach/port.h>
#include <mach/message.h>
+#include <machine/copy_user.h>
#include <kern/assert.h>
#include <kern/counters.h>
#include <kern/debug.h>
@@ -540,7 +541,7 @@ ipc_mqueue_receive(
if (kmsg != IKM_NULL) {
/* check space requirements */
- if (kmsg->ikm_header.msgh_size > max_size) {
+ if (msg_usize(&kmsg->ikm_header) > max_size) {
* (mach_msg_size_t *) kmsgp =
kmsg->ikm_header.msgh_size;
imq_unlock(mqueue);
@@ -649,7 +650,7 @@ ipc_mqueue_receive(
/* we have a kmsg; unlock the msg queue */
imq_unlock(mqueue);
- assert(kmsg->ikm_header.msgh_size <= max_size);
+ assert(msg_usize(&kmsg->ikm_header) <= max_size);
}
{
diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
index 0ae8fe0c..1664e29b 100644
--- a/ipc/mach_msg.c
+++ b/ipc/mach_msg.c
@@ -39,6 +39,7 @@
#include <mach/kern_return.h>
#include <mach/port.h>
#include <mach/message.h>
+#include <machine/copy_user.h>
#include <kern/assert.h>
#include <kern/counters.h>
#include <kern/debug.h>
@@ -241,7 +242,7 @@ mach_msg_receive(
return mr;
kmsg->ikm_header.msgh_seqno = seqno;
- if (kmsg->ikm_header.msgh_size > rcv_size) {
+ if (msg_usize(&kmsg->ikm_header) > rcv_size) {
ipc_kmsg_copyout_dest(kmsg, space);
(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
return MACH_RCV_TOO_LARGE;
@@ -321,7 +322,7 @@ mach_msg_receive_continue(void)
}
kmsg->ikm_header.msgh_seqno = seqno;
- assert(kmsg->ikm_header.msgh_size <= rcv_size);
+ assert(msg_usize(&kmsg->ikm_header) <= rcv_size);
} else {
mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
MACH_MSG_SIZE_MAX, time_out,
@@ -335,7 +336,7 @@ mach_msg_receive_continue(void)
}
kmsg->ikm_header.msgh_seqno = seqno;
- if (kmsg->ikm_header.msgh_size > rcv_size) {
+ if (msg_usize(&kmsg->ikm_header) > rcv_size) {
ipc_kmsg_copyout_dest(kmsg, space);
(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
thread_syscall_return(MACH_RCV_TOO_LARGE);
@@ -450,7 +451,7 @@ mach_msg_trap(
*/
if ((send_size > IKM_SAVED_MSG_SIZE) ||
- (send_size < sizeof(mach_msg_header_t)) ||
+ (send_size < sizeof(mach_msg_user_header_t)) ||
(send_size & 3) ||
((kmsg = ikm_cache()) == IKM_NULL))
goto slow_get;
@@ -464,8 +465,6 @@ mach_msg_trap(
goto slow_get;
}
- kmsg->ikm_header.msgh_size = send_size;
-
fast_copyin:
/*
* optimized ipc_kmsg_copyin/ipc_mqueue_copyin
@@ -942,7 +941,7 @@ mach_msg_trap(
== dest_port);
reply_size = kmsg->ikm_header.msgh_size;
- if (rcv_size < reply_size)
+ if (rcv_size < msg_usize(&kmsg->ikm_header))
goto slow_copyout;
/* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */
@@ -1452,7 +1451,7 @@ mach_msg_trap(
*/
reply_size = kmsg->ikm_header.msgh_size;
- if (rcv_size < reply_size) {
+ if (rcv_size < msg_usize(&kmsg->ikm_header)) {
ipc_kmsg_copyout_dest(kmsg, space);
(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
thread_syscall_return(MACH_RCV_TOO_LARGE);
@@ -1546,7 +1545,7 @@ mach_msg_trap(
return mr;
kmsg->ikm_header.msgh_seqno = seqno;
- if (rcv_size < kmsg->ikm_header.msgh_size) {
+ if (rcv_size < msg_usize(&kmsg->ikm_header)) {
ipc_kmsg_copyout_dest(kmsg, space);
(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
return MACH_RCV_TOO_LARGE;
@@ -1630,7 +1629,7 @@ mach_msg_continue(void)
}
kmsg->ikm_header.msgh_seqno = seqno;
- if (kmsg->ikm_header.msgh_size > rcv_size) {
+ if (msg_usize(&kmsg->ikm_header) > rcv_size) {
ipc_kmsg_copyout_dest(kmsg, space);
(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
thread_syscall_return(MACH_RCV_TOO_LARGE);
diff --git a/x86_64/Makefrag.am b/x86_64/Makefrag.am
index 0139940a..1986f2a3 100644
--- a/x86_64/Makefrag.am
+++ b/x86_64/Makefrag.am
@@ -92,6 +92,7 @@ libkernel_a_SOURCES += \
i386/i386/cpu.h \
i386/i386/cpu_number.h \
x86_64/cswitch.S \
+ x86_64/copy_user.c \
i386/i386/db_disasm.c \
i386/i386/db_interface.c \
i386/i386/db_interface.h \
diff --git a/x86_64/copy_user.c b/x86_64/copy_user.c
new file mode 100644
index 00000000..b9c94d76
--- /dev/null
+++ b/x86_64/copy_user.c
@@ -0,0 +1,280 @@
+
+#include <string.h>
+
+#include <kern/debug.h>
+#include <mach/boolean.h>
+
+#include <copy_user.h>
+
+static inline vm_size_t unpack_msg_type(vm_offset_t addr,
+ mach_msg_type_name_t *name,
+ mach_msg_type_size_t *size,
+ mach_msg_type_number_t *number,
+ boolean_t *is_inline)
+{
+ mach_msg_type_t* kmt = (mach_msg_type_t*)addr;
+ *is_inline = kmt->msgt_inline;
+ if (kmt->msgt_longform)
+ {
+ mach_msg_type_long_t* kmtl = (mach_msg_type_long_t*)addr;
+ *name = kmtl->msgtl_name;
+ *size = kmtl->msgtl_size;
+ *number = kmtl->msgtl_number;
+ return sizeof(mach_msg_type_long_t);
+ }
+ else
+ {
+ *name = kmt->msgt_name;
+ *size = kmt->msgt_size;
+ *number = kmt->msgt_number;
+ return sizeof(mach_msg_type_t);
+ }
+}
+
+static inline void adjust_msg_type_size(vm_offset_t addr, int amount)
+{
+ mach_msg_type_t* kmt = (mach_msg_type_t*)addr;
+ if (kmt->msgt_longform)
+ {
+ mach_msg_type_long_t* kmtl = (mach_msg_type_long_t*)addr;
+ kmtl->msgtl_size += amount*8;
+ }
+ else
+ {
+ kmt->msgt_size += amount*8;
+ }
+}
+
+static uint32_t align(uint32_t val, size_t aln)
+{
+ // we should check aln is a power of 2
+ aln--;
+ return (val + aln) & (~aln);
+}
+
+#define align_inline(val, n) { val = align(val, n); }
+
+/*
+ * Compute the user-space size of a message still in the kernel.
+ * The message may be priginating from userspace (in which case we could
+ * optimize this by keeping the usize around) or from kernel space (we could
+ * optimize if the message stricture is fixed and known in advance).
+ * For now just handle the most general case, iterating over the msg body.
+ */
+size_t msg_usize(const mach_msg_header_t *kmsg)
+{
+ size_t ksize = kmsg->msgh_size;
+ size_t usize = sizeof(mach_msg_user_header_t);
+ if (ksize > sizeof(mach_msg_header_t))
+ {
+ // iterate over body compute the user-space message size
+ vm_offset_t saddr, eaddr;
+ saddr = (vm_offset_t)(kmsg + 1);
+ eaddr = saddr + ksize - sizeof(mach_msg_header_t);
+ while (saddr < (eaddr - sizeof(mach_msg_type_t)))
+ {
+ vm_size_t amount;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline;
+ amount = unpack_msg_type(saddr, &name, &size, &number, &is_inline);
+ saddr += amount;
+ usize += amount;
+
+ if (is_inline)
+ {
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ {
+ saddr += 8 * number;
+ usize += 4 * number;
+ }
+ else
+ {
+ size_t n = size / 8;
+ saddr += n*number;
+ usize += n*number;
+ align_inline(saddr, 4);
+ align_inline(usize, 4);
+ }
+ }
+ else
+ {
+ // advance one pointer
+ saddr += 8;
+ usize += 4;
+ }
+ }
+ }
+ return usize;
+}
+
+/*
+ expand the msg header and, if required the msg body (ports, pointers)
+*/
+int copyinmsg (const void *userbuf, void *kernelbuf, const size_t usize)
+{
+ const mach_msg_user_header_t *umsg = userbuf;
+ mach_msg_header_t *kmsg = kernelbuf;
+ kmsg->msgh_bits = umsg->msgh_bits;
+ /* kmsg->msgh_size is filled in later */
+ /* umsg->msgh_size is not filled in by mig, use usize */
+ kmsg->msgh_remote_port = umsg->msgh_remote_port;
+ kmsg->msgh_local_port = umsg->msgh_local_port;
+ kmsg->msgh_seqno = umsg->msgh_seqno;
+ kmsg->msgh_id = umsg->msgh_id;
+
+ vm_offset_t usaddr, ueaddr, ksaddr;
+ ksaddr = (vm_offset_t)(kmsg + 1);
+ usaddr = (vm_offset_t)(umsg + 1);
+ ueaddr = (vm_offset_t)umsg + usize;
+ if (usize > sizeof(mach_msg_user_header_t))
+ {
+ /* check we have at least space for an empty descryptor */
+ while (usaddr < (ueaddr - sizeof(mach_msg_type_t)))
+ {
+ vm_size_t amount;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline;
+ align_inline(usaddr, 4);
+ align_inline(ksaddr, 4);
+ amount = unpack_msg_type(usaddr, &name, &size, &number, &is_inline);
+ vm_offset_t ktaddr=ksaddr;
+ memcpy((char*)ksaddr, (char*)usaddr, amount);
+ usaddr += amount;
+ ksaddr += amount;
+
+ if (is_inline)
+ {
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ {
+ if ((usaddr + sizeof(mach_port_name_t)*number) > ueaddr)
+ return 1;
+ adjust_msg_type_size(ktaddr, 4);
+ for (int i=0; i<number; i++)
+ {
+ *(mach_port_t*)ksaddr = *(mach_port_name_t*)usaddr;
+ ksaddr += 8;
+ usaddr += 4;
+ }
+ }
+ else
+ {
+ // type that doesn't need change
+ size_t n = size / 8;
+ if ((usaddr + n*number) > ueaddr)
+ return 1;
+ memcpy((char*)ksaddr, (char*)usaddr, n*number);
+ usaddr += n*number;
+ ksaddr += n*number;
+ align_inline(usaddr, 4);
+ align_inline(ksaddr, 4);
+ }
+ }
+ else
+ {
+ if ((usaddr + 4) > ueaddr)
+ return 1;
+
+ // out-of-line port arrays are expanded in ipc_kmsg_copyin_body()
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ adjust_msg_type_size(ktaddr, 4);
+
+#ifdef USER32
+ *(unsigned long long*)ksaddr = *(unsigned int*)usaddr;
+ // advance one pointer
+ ksaddr += 8;
+ usaddr += 4;
+#else
+#error "fixme 64 bit userspace"
+#endif
+ }
+ }
+ }
+
+ kmsg->msgh_size = sizeof(mach_msg_header_t) + ksaddr - (vm_offset_t)(kmsg +
1);
+ align_inline(kmsg->msgh_size, 4);
+ return 0;
+}
+
+int copyoutmsg (const void *kernelbuf, void *userbuf, const size_t ksize)
+{
+ const mach_msg_header_t *kmsg = kernelbuf;
+ mach_msg_user_header_t *umsg = userbuf;
+ umsg->msgh_bits = kmsg->msgh_bits;
+ /* umsg->msgh_size is filled in later; */
+ umsg->msgh_remote_port = kmsg->msgh_remote_port;
+ umsg->msgh_local_port = kmsg->msgh_local_port;
+ umsg->msgh_seqno = kmsg->msgh_seqno;
+ umsg->msgh_id = kmsg->msgh_id;
+
+ vm_offset_t ksaddr, keaddr, usaddr;
+ ksaddr = (vm_offset_t)(kmsg + 1);
+ usaddr = (vm_offset_t)(umsg + 1);
+ keaddr = ksaddr + ksize - sizeof(mach_msg_header_t);
+
+ if (ksize > sizeof(mach_msg_user_header_t))
+ {
+ while (ksaddr < keaddr)
+ {
+ vm_size_t amount;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline;
+ align_inline(usaddr, 4);
+ align_inline(ksaddr, 4);
+ amount = unpack_msg_type(ksaddr, &name, &size, &number, &is_inline);
+ vm_offset_t utaddr=usaddr;
+ memcpy((char*)usaddr, (char*)ksaddr, amount);
+ usaddr += amount;
+ ksaddr += amount;
+
+ if (is_inline)
+ {
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ {
+ adjust_msg_type_size(utaddr, -4);
+ for (int i=0; i<number; i++)
+ {
+ *(mach_port_name_t*)usaddr = *(mach_port_t*)ksaddr;
+ ksaddr += 8;
+ usaddr += 4;
+ }
+ }
+ else
+ {
+ // type that doesn't need change
+ size_t n = size / 8;
+ memcpy((char*)usaddr, (char*)ksaddr, n*number);
+ usaddr += n*number;
+ ksaddr += n*number;
+ align_inline(usaddr, 4);
+ align_inline(ksaddr, 4);
+ }
+ }
+ else
+ {
+ // out-of-line port arrays are shrinked in
ipc_kmsg_copyout_body()
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ adjust_msg_type_size(utaddr, -4);
+#ifdef USER32
+ *(unsigned int*)usaddr = *(unsigned long long*)ksaddr;
+ // advance one pointer
+ ksaddr += 8;
+ usaddr += 4;
+#else
+#error "fixme 64 bit userspace"
+#endif
+ }
+
+ }
+ }
+
+ umsg->msgh_size = sizeof(mach_msg_user_header_t) + usaddr -
(vm_offset_t)(umsg + 1);
+ align_inline(umsg->msgh_size, 4);
+ return 0;
+
+}
diff --git a/x86_64/locore.S b/x86_64/locore.S
index 612fc493..198ac40a 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -1310,46 +1310,6 @@ copyin_fail:
movq $1,%rax /* return 1 for failure */
jmp copyin_ret /* pop frame and return */
-/*
- * Copy from user address space - version for copying messages.
- * arg0: user address
- * arg1: kernel address
- * arg2: byte count
- */
-ENTRY(copyinmsg)
- xchgq %rsi,%rdi /* Get user source and kernel
destination */
-
-/* 32 on 64 conversion */
- subq $32,%rdx
- js bogus
-
- /* Copy msgh_bits */
- RECOVER(copyin_fail)
- movsl
-
- /* Copy msgh_size */
- RECOVER(copyin_fail)
- lodsl
- addl $8,%eax
- stosl
-
- xorq %rax,%rax
- /* Copy msgh_remote_port */
- RECOVER(copyin_fail)
- lodsl
- stosq
-
- /* Copy msgh_local_port */
- RECOVER(copyin_fail)
- lodsl
- stosq
-
- /* Copy msgh_seqno and msgh_id */
- RECOVER(copyin_fail)
- movsq
-
- jmp copyin_remainder
-
bogus:
ud2
@@ -1378,45 +1338,6 @@ copyout_fail:
movq $1,%rax /* return 1 for failure */
jmp copyout_ret /* pop frame and return */
-/*
- * Copy to user address space.
- * arg0: kernel address
- * arg1: user address
- * arg2: byte count
- */
-ENTRY(copyoutmsg)
- xchgq %rsi,%rdi /* Get user source and kernel
destination */
-
-/* 32 on 64 conversion */
- subq $32,%rdx
- js bogus
-
- /* Copy msgh_bits */
- RECOVER(copyout_fail)
- movsl
-
- /* Copy msgh_size */
- lodsl
- subl $8,%eax
- RECOVER(copyout_fail)
- stosl
-
- /* Copy msgh_remote_port */
- lodsq
- RECOVER(copyout_fail)
- stosl
-
- /* Copy msgh_local_port */
- lodsq
- RECOVER(copyout_fail)
- stosl
-
- /* Copy msgh_seqno and msgh_id */
- RECOVER(copyout_fail)
- movsq
-
- jmp copyin_remainder
-
/*
* int inst_fetch(int eip, int cs);
*
--
2.30.2
- [PATCH 00/15] Add preliminary support for 32-bit userspace on a x86_64 kernel, Luca Dariz, 2022/06/28
- [PATCH 01/15] fix rpc types for KERNEL_USER stubs, Luca Dariz, 2022/06/28
- [PATCH 03/15] fix argument passing to bootstrap modules, Luca Dariz, 2022/06/28
- [PATCH 06/15] kmsg: fix msg body alignment, Luca Dariz, 2022/06/28
- [PATCH 09/15] x86_64: fix exception stack alignment, Luca Dariz, 2022/06/28
- [PATCH 04/15] compute mach port size from the corresponding type, Luca Dariz, 2022/06/28
- [PATCH 05/15] sign-extend mask in vm_map() with 32-bit userspace, Luca Dariz, 2022/06/28
- [PATCH 08/15] use port name type in mach_port_names(), Luca Dariz, 2022/06/28
- [PATCH 10/15] x86_64: expand and shrink messages in copy{in, out}msg routines,
Luca Dariz <=
- [PATCH 02/15] simplify ipc_kmsg_copyout_body() usage, Luca Dariz, 2022/06/28
- [PATCH 07/15] fix host_info structure definition, Luca Dariz, 2022/06/28
- [PATCH 11/15] update syscall signature with rpc_vm_* and mach_port_name_t, Luca Dariz, 2022/06/28
- [PATCH 15/15] enable syscalls on x86_64, Luca Dariz, 2022/06/28
- [PATCH 14/15] hack vm memory object proxy creation for vm arrays, Luca Dariz, 2022/06/28
- [PATCH 13/15] cleanup headers in printf.c, Luca Dariz, 2022/06/28
- [PATCH 12/15] fix warnings for 32 bit builds, Luca Dariz, 2022/06/28