[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [XEN][RFC PATCH 05/15] hvm: Modify hvm_op
From: |
Julien Grall |
Subject: |
[Qemu-devel] [XEN][RFC PATCH 05/15] hvm: Modify hvm_op |
Date: |
Thu, 22 Mar 2012 15:59:26 +0000 |
This patch remove useless hvm_param due to structure modification
and bind the new hypercalls to handle ioreq servers and pci.
Signed-off-by: Julien Grall <address@hidden>
---
xen/arch/x86/hvm/hvm.c | 127 ++++++++++++++++++++++++++++++------------------
1 files changed, 80 insertions(+), 47 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 1b38762..3117ae1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4009,7 +4009,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void)
arg)
case HVMOP_get_param:
{
struct xen_hvm_param a;
- struct hvm_ioreq_page *iorp;
struct domain *d;
struct vcpu *v;
@@ -4037,21 +4036,14 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void)
arg)
switch ( a.index )
{
- case HVM_PARAM_IOREQ_PFN:
- iorp = &d->arch.hvm_domain.ioreq;
- if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 )
- break;
- spin_lock(&iorp->lock);
- if ( iorp->va != NULL )
- /* Initialise evtchn port info if VCPUs already created. */
- for_each_vcpu ( d, v )
- get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
- spin_unlock(&iorp->lock);
- break;
- case HVM_PARAM_BUFIOREQ_PFN:
- iorp = &d->arch.hvm_domain.buf_ioreq;
- rc = hvm_set_ioreq_page(d, iorp, a.value);
+ case HVM_PARAM_IO_PFN_FIRST:
+ rc = hvm_set_ioreq_page(d, &d->arch.hvm_domain.ioreq, a.value);
+ gdprintk(XENLOG_DEBUG, "Pfn first = 0x%lx\n", a.value);
+ gdprintk(XENLOG_DEBUG, "va = %p\n",
d->arch.hvm_domain.ioreq.va);
break;
+ case HVM_PARAM_IO_PFN_LAST:
+ if ( (d->arch.hvm_domain.params[HVM_PARAM_IO_PFN_LAST]) )
+ rc = -EINVAL;
case HVM_PARAM_CALLBACK_IRQ:
hvm_set_callback_via(d, a.value);
hvm_latch_shinfo_size(d);
@@ -4096,38 +4088,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void)
arg)
domctl_lock_release();
break;
- case HVM_PARAM_DM_DOMAIN:
- /* Not reflexive, as we must domain_pause(). */
- rc = -EPERM;
- if ( curr_d == d )
- break;
-
- if ( a.value == DOMID_SELF )
- a.value = curr_d->domain_id;
-
- rc = 0;
- domain_pause(d); /* safe to change per-vcpu xen_port */
- iorp = &d->arch.hvm_domain.ioreq;
- for_each_vcpu ( d, v )
- {
- int old_port, new_port;
- new_port = alloc_unbound_xen_event_channel(
- v, a.value, NULL);
- if ( new_port < 0 )
- {
- rc = new_port;
- break;
- }
- /* xchg() ensures that only we free_xen_event_channel() */
- old_port = xchg(&v->arch.hvm_vcpu.xen_port, new_port);
- free_xen_event_channel(v, old_port);
- spin_lock(&iorp->lock);
- if ( iorp->va != NULL )
- get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
- spin_unlock(&iorp->lock);
- }
- domain_unpause(d);
- break;
case HVM_PARAM_ACPI_S_STATE:
/* Not reflexive, as we must domain_pause(). */
rc = -EPERM;
@@ -4650,6 +4610,79 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void)
arg)
break;
}
+ case HVMOP_register_ioreq_server:
+ {
+ struct xen_hvm_register_ioreq_server a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_register_ioreq_server(&a);
+ if ( rc != 0 )
+ return rc;
+
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ break;
+ }
+
+ case HVMOP_get_ioreq_server_buf_channel:
+ {
+ struct xen_hvm_get_ioreq_server_buf_channel a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_get_ioreq_server_buf_channel(&a);
+ if ( rc != 0 )
+ return rc;
+
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+
+ break;
+ }
+
+ case HVMOP_map_io_range_to_ioreq_server:
+ {
+ struct xen_hvm_map_io_range_to_ioreq_server a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_map_io_range_to_ioreq_server(&a);
+ if ( rc != 0 )
+ return rc;
+
+ break;
+ }
+
+ case HVMOP_unmap_io_range_from_ioreq_server:
+ {
+ struct xen_hvm_unmap_io_range_from_ioreq_server a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_unmap_io_range_from_ioreq_server(&a);
+ if ( rc != 0 )
+ return rc;
+
+ break;
+ }
+
+ case HVMOP_register_pcidev:
+ {
+ struct xen_hvm_register_pcidev a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvm_register_pcidev(a.domid, a.id, a.bdf);
+ if ( rc != 0 )
+ return rc;
+
+ break;
+ }
+
default:
{
gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op);
--
Julien Grall
- [Qemu-devel] [XEN][RFC PATCH 00/15] QEMU disaggregation, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 03/15] hvm-pci: Handle PCI config space in Xen, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 01/15] hvm: Modify interface to support multiple ioreq server, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 05/15] hvm: Modify hvm_op,
Julien Grall <=
- [Qemu-devel] [XEN][RFC PATCH 11/15] xc: Fix python build, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 07/15] hvm-io: send invalidate map cache to each registered servers, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 13/15] xl-qmp: add device model id to qmp function, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 09/15] xc: Add the hypercall for multiple servers, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 02/15] hvm: Add functions to handle ioreq servers, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 04/15] hvm: Change initialization/destruction of an hvm, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 06/15] hvm-io: IO refactoring with ioreq server, Julien Grall, 2012/03/22
- [Qemu-devel] [XEN][RFC PATCH 08/15] hvm-io: Handle server in buffered IO, Julien Grall, 2012/03/22