qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2.1 03/11] monitor: Make MonitorQMP a child class o


From: Markus Armbruster
Subject: [Qemu-devel] [PATCH v2.1 03/11] monitor: Make MonitorQMP a child class of Monitor
Date: Wed, 12 Jun 2019 10:05:18 +0200

From: Kevin Wolf <address@hidden>

Currently, struct Monitor mixes state that is only relevant for HMP,
state that is only relevant for QMP, and some actually shared state.
In particular, a MonitorQMP field is present in the state of any
monitor, even if it's not a QMP monitor and therefore doesn't use the
state.

As a first step towards a clean separation between QMP and HMP, let
MonitorQMP extend Monitor and create a MonitorQMP object only when the
monitor is actually a QMP monitor.

Signed-off-by: Kevin Wolf <address@hidden>
Reviewed-by: Dr. David Alan Gilbert <address@hidden>
Signed-off-by: Markus Armbruster <address@hidden>
---
 monitor.c | 221 ++++++++++++++++++++++++++++--------------------------
 1 file changed, 113 insertions(+), 108 deletions(-)

diff --git a/monitor.c b/monitor.c
index 0e145959d7..62a3c06aeb 100644
--- a/monitor.c
+++ b/monitor.c
@@ -166,26 +166,6 @@ struct MonFdset {
     QLIST_ENTRY(MonFdset) next;
 };
 
-typedef struct {
-    JSONMessageParser parser;
-    /*
-     * When a client connects, we're in capabilities negotiation mode.
-     * @commands is &qmp_cap_negotiation_commands then.  When command
-     * qmp_capabilities succeeds, we go into command mode, and
-     * @command becomes &qmp_commands.
-     */
-    QmpCommandList *commands;
-    bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */
-    bool capab[QMP_CAPABILITY__MAX];         /* offered and accepted */
-    /*
-     * Protects qmp request/response queue.
-     * Take monitor_lock first when you need both.
-     */
-    QemuMutex qmp_queue_lock;
-    /* Input queue that holds all the parsed QMP requests */
-    GQueue *qmp_requests;
-} MonitorQMP;
-
 /*
  * To prevent flooding clients, events can be throttled. The
  * throttling is calculated globally, rather than per-Monitor
@@ -218,7 +198,6 @@ struct Monitor {
      */
     ReadLineState *rs;
 
-    MonitorQMP qmp;
     gchar *mon_cpu_path;
     mon_cmd_t *cmd_table;
     QTAILQ_ENTRY(Monitor) entry;
@@ -239,6 +218,27 @@ struct Monitor {
     int mux_out;
 };
 
+typedef struct {
+    Monitor common;
+    JSONMessageParser parser;
+    /*
+     * When a client connects, we're in capabilities negotiation mode.
+     * @commands is &qmp_cap_negotiation_commands then.  When command
+     * qmp_capabilities succeeds, we go into command mode, and
+     * @command becomes &qmp_commands.
+     */
+    QmpCommandList *commands;
+    bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */
+    bool capab[QMP_CAPABILITY__MAX];         /* offered and accepted */
+    /*
+     * Protects qmp request/response queue.
+     * Take monitor_lock first when you need both.
+     */
+    QemuMutex qmp_queue_lock;
+    /* Input queue that holds all the parsed QMP requests */
+    GQueue *qmp_requests;
+} MonitorQMP;
+
 /* Shared monitor I/O thread */
 IOThread *mon_iothread;
 
@@ -247,7 +247,7 @@ QEMUBH *qmp_dispatcher_bh;
 
 struct QMPRequest {
     /* Owner of the request */
-    Monitor *mon;
+    MonitorQMP *mon;
     /*
      * Request object to be handled or Error to be reported
      * (exactly one of them is non-null)
@@ -355,22 +355,18 @@ static void qmp_request_free(QMPRequest *req)
 }
 
 /* Caller must hold mon->qmp.qmp_queue_lock */
-static void monitor_qmp_cleanup_req_queue_locked(Monitor *mon)
+static void monitor_qmp_cleanup_req_queue_locked(MonitorQMP *mon)
 {
-    assert(monitor_is_qmp(mon));
-
-    while (!g_queue_is_empty(mon->qmp.qmp_requests)) {
-        qmp_request_free(g_queue_pop_head(mon->qmp.qmp_requests));
+    while (!g_queue_is_empty(mon->qmp_requests)) {
+        qmp_request_free(g_queue_pop_head(mon->qmp_requests));
     }
 }
 
-static void monitor_qmp_cleanup_queues(Monitor *mon)
+static void monitor_qmp_cleanup_queues(MonitorQMP *mon)
 {
-    assert(monitor_is_qmp(mon));
-
-    qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+    qemu_mutex_lock(&mon->qmp_queue_lock);
     monitor_qmp_cleanup_req_queue_locked(mon);
-    qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+    qemu_mutex_unlock(&mon->qmp_queue_lock);
 }
 
 
@@ -482,17 +478,17 @@ int monitor_printf(Monitor *mon, const char *fmt, ...)
     return ret;
 }
 
-static void qmp_send_response(Monitor *mon, const QDict *rsp)
+static void qmp_send_response(MonitorQMP *mon, const QDict *rsp)
 {
     const QObject *data = QOBJECT(rsp);
     QString *json;
 
-    json = mon->flags & MONITOR_USE_PRETTY ? qobject_to_json_pretty(data) :
-                                             qobject_to_json(data);
+    json = mon->common.flags & MONITOR_USE_PRETTY ?
+           qobject_to_json_pretty(data) : qobject_to_json(data);
     assert(json != NULL);
 
     qstring_append_chr(json, '\n');
-    monitor_puts(mon, qstring_get_str(json));
+    monitor_puts(&mon->common, qstring_get_str(json));
 
     qobject_unref(json);
 }
@@ -515,12 +511,17 @@ static MonitorQAPIEventConf 
monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
 {
     Monitor *mon;
+    MonitorQMP *qmp_mon;
 
     trace_monitor_protocol_event_emit(event, qdict);
     QTAILQ_FOREACH(mon, &mon_list, entry) {
-        if (monitor_is_qmp(mon)
-            && mon->qmp.commands != &qmp_cap_negotiation_commands) {
-            qmp_send_response(mon, qdict);
+        if (!monitor_is_qmp(mon)) {
+            continue;
+        }
+
+        qmp_mon = container_of(mon, MonitorQMP, common);
+        if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
+            qmp_send_response(qmp_mon, qdict);
         }
     }
 }
@@ -722,12 +723,12 @@ static void monitor_data_init(Monitor *mon, int flags, 
bool skip_flush,
     mon->flags = flags;
 }
 
-static void monitor_data_destroy_qmp(Monitor *mon)
+static void monitor_data_destroy_qmp(MonitorQMP *mon)
 {
-    json_message_parser_destroy(&mon->qmp.parser);
-    qemu_mutex_destroy(&mon->qmp.qmp_queue_lock);
+    json_message_parser_destroy(&mon->parser);
+    qemu_mutex_destroy(&mon->qmp_queue_lock);
     monitor_qmp_cleanup_req_queue_locked(mon);
-    g_queue_free(mon->qmp.qmp_requests);
+    g_queue_free(mon->qmp_requests);
 }
 
 static void monitor_data_destroy(Monitor *mon)
@@ -735,7 +736,7 @@ static void monitor_data_destroy(Monitor *mon)
     g_free(mon->mon_cpu_path);
     qemu_chr_fe_deinit(&mon->chr, false);
     if (monitor_is_qmp(mon)) {
-        monitor_data_destroy_qmp(mon);
+        monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
     }
     readline_free(mon->rs);
     qobject_unref(mon->outbuf);
@@ -1092,9 +1093,12 @@ static void query_commands_cb(QmpCommand *cmd, void 
*opaque)
 CommandInfoList *qmp_query_commands(Error **errp)
 {
     CommandInfoList *list = NULL;
+    MonitorQMP *mon;
 
     assert(monitor_is_qmp(cur_mon));
-    qmp_for_each_command(cur_mon->qmp.commands, query_commands_cb, &list);
+    mon = container_of(cur_mon, MonitorQMP, common);
+
+    qmp_for_each_command(mon->commands, query_commands_cb, &list);
 
     return list;
 }
@@ -1161,18 +1165,16 @@ static void monitor_init_qmp_commands(void)
                          qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG);
 }
 
-static bool qmp_oob_enabled(Monitor *mon)
+static bool qmp_oob_enabled(MonitorQMP *mon)
 {
-    assert(monitor_is_qmp(mon));
-    return mon->qmp.capab[QMP_CAPABILITY_OOB];
+    return mon->capab[QMP_CAPABILITY_OOB];
 }
 
-static void monitor_qmp_caps_reset(Monitor *mon)
+static void monitor_qmp_caps_reset(MonitorQMP *mon)
 {
-    assert(monitor_is_qmp(mon));
-    memset(mon->qmp.capab_offered, 0, sizeof(mon->qmp.capab_offered));
-    memset(mon->qmp.capab, 0, sizeof(mon->qmp.capab));
-    mon->qmp.capab_offered[QMP_CAPABILITY_OOB] = mon->use_io_thread;
+    memset(mon->capab_offered, 0, sizeof(mon->capab_offered));
+    memset(mon->capab, 0, sizeof(mon->capab));
+    mon->capab_offered[QMP_CAPABILITY_OOB] = mon->common.use_io_thread;
 }
 
 /*
@@ -1180,17 +1182,16 @@ static void monitor_qmp_caps_reset(Monitor *mon)
  * On success, set mon->qmp.capab[], and return true.
  * On error, set @errp, and return false.
  */
-static bool qmp_caps_accept(Monitor *mon, QMPCapabilityList *list,
+static bool qmp_caps_accept(MonitorQMP *mon, QMPCapabilityList *list,
                             Error **errp)
 {
     GString *unavailable = NULL;
     bool capab[QMP_CAPABILITY__MAX];
 
-    assert(monitor_is_qmp(mon));
     memset(capab, 0, sizeof(capab));
 
     for (; list; list = list->next) {
-        if (!mon->qmp.capab_offered[list->value]) {
+        if (!mon->capab_offered[list->value]) {
             if (!unavailable) {
                 unavailable = g_string_new(QMPCapability_str(list->value));
             } else {
@@ -1207,26 +1208,30 @@ static bool qmp_caps_accept(Monitor *mon, 
QMPCapabilityList *list,
         return false;
     }
 
-    memcpy(mon->qmp.capab, capab, sizeof(capab));
+    memcpy(mon->capab, capab, sizeof(capab));
     return true;
 }
 
 void qmp_qmp_capabilities(bool has_enable, QMPCapabilityList *enable,
                           Error **errp)
 {
+    MonitorQMP *mon;
+
     assert(monitor_is_qmp(cur_mon));
-    if (cur_mon->qmp.commands == &qmp_commands) {
+    mon = container_of(cur_mon, MonitorQMP, common);
+
+    if (mon->commands == &qmp_commands) {
         error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND,
                   "Capabilities negotiation is already complete, command "
                   "ignored");
         return;
     }
 
-    if (!qmp_caps_accept(cur_mon, enable, errp)) {
+    if (!qmp_caps_accept(mon, enable, errp)) {
         return;
     }
 
-    cur_mon->qmp.commands = &qmp_commands;
+    mon->commands = &qmp_commands;
 }
 
 /* Set the current CPU defined by the user. Callers must hold BQL. */
@@ -4133,28 +4138,27 @@ static int monitor_can_read(void *opaque)
  * Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP.
  * Nothing is emitted then.
  */
-static void monitor_qmp_respond(Monitor *mon, QDict *rsp)
+static void monitor_qmp_respond(MonitorQMP *mon, QDict *rsp)
 {
     if (rsp) {
         qmp_send_response(mon, rsp);
     }
 }
 
-static void monitor_qmp_dispatch(Monitor *mon, QObject *req)
+static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req)
 {
     Monitor *old_mon;
     QDict *rsp;
     QDict *error;
 
-    assert(monitor_is_qmp(mon));
     old_mon = cur_mon;
-    cur_mon = mon;
+    cur_mon = &mon->common;
 
-    rsp = qmp_dispatch(mon->qmp.commands, req, qmp_oob_enabled(mon));
+    rsp = qmp_dispatch(mon->commands, req, qmp_oob_enabled(mon));
 
     cur_mon = old_mon;
 
-    if (mon->qmp.commands == &qmp_cap_negotiation_commands) {
+    if (mon->commands == &qmp_cap_negotiation_commands) {
         error = qdict_get_qdict(rsp, "error");
         if (error
             && !g_strcmp0(qdict_get_try_str(error, "class"),
@@ -4179,13 +4183,14 @@ static void monitor_qmp_dispatch(Monitor *mon, QObject 
*req)
  * monitor to the end of mon_list queue.
  *
  * Note: if the function returned with non-NULL, then the caller will
- * be with mon->qmp.qmp_queue_lock held, and the caller is responsible
+ * be with qmp_mon->qmp_queue_lock held, and the caller is responsible
  * to release it.
  */
 static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void)
 {
     QMPRequest *req_obj = NULL;
     Monitor *mon;
+    MonitorQMP *qmp_mon;
 
     qemu_mutex_lock(&monitor_lock);
 
@@ -4193,13 +4198,15 @@ static QMPRequest 
*monitor_qmp_requests_pop_any_with_lock(void)
         if (!monitor_is_qmp(mon)) {
             continue;
         }
-        qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
-        req_obj = g_queue_pop_head(mon->qmp.qmp_requests);
+
+        qmp_mon = container_of(mon, MonitorQMP, common);
+        qemu_mutex_lock(&qmp_mon->qmp_queue_lock);
+        req_obj = g_queue_pop_head(qmp_mon->qmp_requests);
         if (req_obj) {
             /* With the lock of corresponding queue held */
             break;
         }
-        qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+        qemu_mutex_unlock(&qmp_mon->qmp_queue_lock);
     }
 
     if (req_obj) {
@@ -4221,18 +4228,17 @@ static void monitor_qmp_bh_dispatcher(void *data)
     QMPRequest *req_obj = monitor_qmp_requests_pop_any_with_lock();
     QDict *rsp;
     bool need_resume;
-    Monitor *mon;
+    MonitorQMP *mon;
 
     if (!req_obj) {
         return;
     }
 
     mon = req_obj->mon;
-    assert(monitor_is_qmp(mon));
     /*  qmp_oob_enabled() might change after "qmp_capabilities" */
     need_resume = !qmp_oob_enabled(mon) ||
-        mon->qmp.qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
-    qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+        mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
+    qemu_mutex_unlock(&mon->qmp_queue_lock);
     if (req_obj->req) {
         QDict *qdict = qobject_to(QDict, req_obj->req);
         QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
@@ -4248,7 +4254,7 @@ static void monitor_qmp_bh_dispatcher(void *data)
 
     if (need_resume) {
         /* Pairs with the monitor_suspend() in handle_qmp_command() */
-        monitor_resume(mon);
+        monitor_resume(&mon->common);
     }
     qmp_request_free(req_obj);
 
@@ -4258,7 +4264,7 @@ static void monitor_qmp_bh_dispatcher(void *data)
 
 static void handle_qmp_command(void *opaque, QObject *req, Error *err)
 {
-    Monitor *mon = opaque;
+    MonitorQMP *mon = opaque;
     QObject *id = NULL;
     QDict *qdict;
     QMPRequest *req_obj;
@@ -4290,7 +4296,7 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
     req_obj->err = err;
 
     /* Protect qmp_requests and fetching its length. */
-    qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+    qemu_mutex_lock(&mon->qmp_queue_lock);
 
     /*
      * Suspend the monitor when we can't queue more requests after
@@ -4299,8 +4305,8 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
      * command, for backward compatibility.
      */
     if (!qmp_oob_enabled(mon) ||
-        mon->qmp.qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1) {
-        monitor_suspend(mon);
+        mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1) {
+        monitor_suspend(&mon->common);
     }
 
     /*
@@ -4308,9 +4314,9 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
      * handled in time order.  Ownership for req_obj, req,
      * etc. will be delivered to the handler side.
      */
-    assert(mon->qmp.qmp_requests->length < QMP_REQ_QUEUE_LEN_MAX);
-    g_queue_push_tail(mon->qmp.qmp_requests, req_obj);
-    qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+    assert(mon->qmp_requests->length < QMP_REQ_QUEUE_LEN_MAX);
+    g_queue_push_tail(mon->qmp_requests, req_obj);
+    qemu_mutex_unlock(&mon->qmp_queue_lock);
 
     /* Kick the dispatcher routine */
     qemu_bh_schedule(qmp_dispatcher_bh);
@@ -4318,9 +4324,9 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
 
 static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
 {
-    Monitor *mon = opaque;
+    MonitorQMP *mon = opaque;
 
-    json_message_parser_feed(&mon->qmp.parser, (const char *) buf, size);
+    json_message_parser_feed(&mon->parser, (const char *) buf, size);
 }
 
 static void monitor_read(void *opaque, const uint8_t *buf, int size)
@@ -4406,18 +4412,16 @@ void monitor_resume(Monitor *mon)
     trace_monitor_suspend(mon, -1);
 }
 
-static QDict *qmp_greeting(Monitor *mon)
+static QDict *qmp_greeting(MonitorQMP *mon)
 {
     QList *cap_list = qlist_new();
     QObject *ver = NULL;
     QMPCapability cap;
 
-    assert(monitor_is_qmp(mon));
-
     qmp_marshal_query_version(NULL, &ver, NULL);
 
     for (cap = 0; cap < QMP_CAPABILITY__MAX; cap++) {
-        if (mon->qmp.capab_offered[cap]) {
+        if (mon->capab_offered[cap]) {
             qlist_append_str(cap_list, QMPCapability_str(cap));
         }
     }
@@ -4430,11 +4434,11 @@ static QDict *qmp_greeting(Monitor *mon)
 static void monitor_qmp_event(void *opaque, int event)
 {
     QDict *data;
-    Monitor *mon = opaque;
+    MonitorQMP *mon = opaque;
 
     switch (event) {
     case CHR_EVENT_OPENED:
-        mon->qmp.commands = &qmp_cap_negotiation_commands;
+        mon->commands = &qmp_cap_negotiation_commands;
         monitor_qmp_caps_reset(mon);
         data = qmp_greeting(mon);
         qmp_send_response(mon, data);
@@ -4449,8 +4453,8 @@ static void monitor_qmp_event(void *opaque, int event)
          * is closed.
          */
         monitor_qmp_cleanup_queues(mon);
-        json_message_parser_destroy(&mon->qmp.parser);
-        json_message_parser_init(&mon->qmp.parser, handle_qmp_command,
+        json_message_parser_destroy(&mon->parser);
+        json_message_parser_init(&mon->parser, handle_qmp_command,
                                  mon, NULL);
         mon_refcount--;
         monitor_fdsets_cleanup();
@@ -4612,33 +4616,34 @@ static void monitor_list_append(Monitor *mon)
 
 static void monitor_qmp_setup_handlers_bh(void *opaque)
 {
-    Monitor *mon = opaque;
+    MonitorQMP *mon = opaque;
     GMainContext *context;
 
-    assert(mon->use_io_thread);
+    assert(mon->common.use_io_thread);
     context = iothread_get_g_main_context(mon_iothread);
     assert(context);
-    qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
-                             monitor_qmp_event, NULL, mon, context, true);
-    monitor_list_append(mon);
+    qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
+                             monitor_qmp_read, monitor_qmp_event,
+                             NULL, &mon->common, context, true);
+    monitor_list_append(&mon->common);
 }
 
 static void monitor_init_qmp(Chardev *chr, int flags)
 {
-    Monitor *mon = g_malloc(sizeof(*mon));
+    MonitorQMP *mon = g_malloc0(sizeof(*mon));
 
     /* Note: we run QMP monitor in I/O thread when @chr supports that */
-    monitor_data_init(mon, flags, false,
+    monitor_data_init(&mon->common, flags, false,
                       qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_GCONTEXT));
 
-    qemu_mutex_init(&mon->qmp.qmp_queue_lock);
-    mon->qmp.qmp_requests = g_queue_new();
+    qemu_mutex_init(&mon->qmp_queue_lock);
+    mon->qmp_requests = g_queue_new();
 
-    qemu_chr_fe_init(&mon->chr, chr, &error_abort);
-    qemu_chr_fe_set_echo(&mon->chr, true);
+    qemu_chr_fe_init(&mon->common.chr, chr, &error_abort);
+    qemu_chr_fe_set_echo(&mon->common.chr, true);
 
-    json_message_parser_init(&mon->qmp.parser, handle_qmp_command, mon, NULL);
-    if (mon->use_io_thread) {
+    json_message_parser_init(&mon->parser, handle_qmp_command, mon, NULL);
+    if (mon->common.use_io_thread) {
         /*
          * Make sure the old iowatch is gone.  It's possible when
          * e.g. the chardev is in client mode, with wait=on.
@@ -4653,10 +4658,10 @@ static void monitor_init_qmp(Chardev *chr, int flags)
                                 monitor_qmp_setup_handlers_bh, mon);
         /* The bottom half will add @mon to @mon_list */
     } else {
-        qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read,
+        qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
                                  monitor_qmp_read, monitor_qmp_event,
-                                 NULL, mon, NULL, true);
-        monitor_list_append(mon);
+                                 NULL, &mon->common, NULL, true);
+        monitor_list_append(&mon->common);
     }
 }
 
-- 
2.21.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]