[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 04/13] Add ThreadletQueue.
From: |
Arun R Bharadwaj |
Subject: |
[Qemu-devel] [PATCH 04/13] Add ThreadletQueue. |
Date: |
Tue, 04 Jan 2011 10:57:27 +0530 |
User-agent: |
StGit/0.15 |
This patch adds a global queue of type ThreadletQueue and
removes the earlier usage of request_list queue.
We want to create the thread on the first submit. Hence we
need to track whether the globalqueue is initialized or not.
Signed-off-by: Arun R Bharadwaj <address@hidden>
---
posix-aio-compat.c | 161 +++++++++++++++++++++++++---------------------------
1 files changed, 77 insertions(+), 84 deletions(-)
diff --git a/posix-aio-compat.c b/posix-aio-compat.c
index 0a4d82b..7b41235 100644
--- a/posix-aio-compat.c
+++ b/posix-aio-compat.c
@@ -31,8 +31,23 @@
#include "block/raw-posix-aio.h"
+#define MAX_GLOBAL_THREADS 64
+#define MIN_GLOBAL_THREADS 8
+
static QemuMutex aiocb_mutex;
static QemuCond aiocb_completion;
+
+typedef struct ThreadletQueue
+{
+ QemuMutex lock;
+ QemuCond cond;
+ int max_threads;
+ int min_threads;
+ int cur_threads;
+ int idle_threads;
+ QTAILQ_HEAD(, ThreadletWork) request_list;
+} ThreadletQueue;
+
typedef struct ThreadletWork
{
QTAILQ_ENTRY(ThreadletWork) node;
@@ -66,15 +81,10 @@ typedef struct PosixAioState {
struct qemu_paiocb *first_aio;
} PosixAioState;
-
-static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
-static pthread_t thread_id;
+/* Default ThreadletQueue */
+static ThreadletQueue globalqueue;
+static int globalqueue_init;
static pthread_attr_t attr;
-static int max_threads = 64;
-static int cur_threads = 0;
-static int idle_threads = 0;
-static QTAILQ_HEAD(, ThreadletWork) request_list;
#ifdef CONFIG_PREADV
static int preadv_present = 1;
@@ -93,32 +103,6 @@ static void die(const char *what)
die2(errno, what);
}
-static void mutex_lock(pthread_mutex_t *mutex)
-{
- int ret = pthread_mutex_lock(mutex);
- if (ret) die2(ret, "pthread_mutex_lock");
-}
-
-static void mutex_unlock(pthread_mutex_t *mutex)
-{
- int ret = pthread_mutex_unlock(mutex);
- if (ret) die2(ret, "pthread_mutex_unlock");
-}
-
-static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
- struct timespec *ts)
-{
- int ret = pthread_cond_timedwait(cond, mutex, ts);
- if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
- return ret;
-}
-
-static void cond_signal(pthread_cond_t *cond)
-{
- int ret = pthread_cond_signal(cond);
- if (ret) die2(ret, "pthread_cond_signal");
-}
-
static void thread_create(pthread_t *thread, pthread_attr_t *attr,
void *(*start_routine)(void*), void *arg)
{
@@ -311,42 +295,45 @@ static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
static void *threadlet_worker(void *data)
{
+ ThreadletQueue *queue = data;
+ qemu_mutex_lock(&queue->lock);
while (1) {
- ssize_t ret = 0;
- qemu_timeval tv;
- struct timespec ts;
ThreadletWork *work;
+ int ret = 0;
- qemu_gettimeofday(&tv);
- ts.tv_sec = tv.tv_sec + 10;
- ts.tv_nsec = 0;
-
- mutex_lock(&lock);
-
- while (QTAILQ_EMPTY(&request_list) &&
- !(ret == ETIMEDOUT)) {
- ret = cond_timedwait(&cond, &lock, &ts);
+ while (QTAILQ_EMPTY(&queue->request_list) &&
+ (ret != ETIMEDOUT)) {
+ /* wait for cond to be signalled or broadcast for 1000s */
+ ret = qemu_cond_timedwait((&queue->cond),
+ &(queue->lock), 10*100000);
}
- if (QTAILQ_EMPTY(&request_list)) {
- idle_threads--;
- cur_threads--;
- mutex_unlock(&lock);
- break;
- }
- work = QTAILQ_FIRST(&request_list);
- QTAILQ_REMOVE(&request_list, work, node);
- idle_threads--;
- mutex_unlock(&lock);
+ assert(queue->idle_threads != 0);
+ if (QTAILQ_EMPTY(&queue->request_list)) {
+ if (queue->cur_threads > queue->min_threads) {
+ /* We retain the minimum number of threads */
+ break;
+ }
+ } else {
+ work = QTAILQ_FIRST(&queue->request_list);
+ QTAILQ_REMOVE(&queue->request_list, work, node);
- work->func(work);
- mutex_lock(&lock);
- idle_threads++;
- mutex_unlock(&lock);
+ queue->idle_threads--;
+ qemu_mutex_unlock(&queue->lock);
+ /* execute the work function */
+ work->func(work);
+
+ qemu_mutex_lock(&queue->lock);
+ queue->idle_threads++;
+ }
}
+ queue->idle_threads--;
+ queue->cur_threads--;
+ qemu_mutex_unlock(&queue->lock);
+
return NULL;
}
@@ -388,22 +375,24 @@ static void aio_thread(ThreadletWork *work)
return;
}
-static void spawn_thread(void)
+static void spawn_threadlet(ThreadletQueue *queue)
{
+ pthread_t thread_id;
sigset_t set, oldset;
- cur_threads++;
- idle_threads++;
+ queue->cur_threads++;
+ queue->idle_threads++;
/* block all signals */
if (sigfillset(&set)) die("sigfillset");
if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask");
- thread_create(&thread_id, &attr, threadlet_worker, NULL);
+ thread_create(&thread_id, &attr, threadlet_worker, queue);
if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore");
}
+
static void qemu_paio_submit(struct qemu_paiocb *aiocb)
{
qemu_mutex_lock(&aiocb_mutex);
@@ -411,14 +400,29 @@ static void qemu_paio_submit(struct qemu_paiocb *aiocb)
aiocb->active = 0;
qemu_mutex_unlock(&aiocb_mutex);
- mutex_lock(&lock);
- if (idle_threads == 0 && cur_threads < max_threads)
- spawn_thread();
+ qemu_mutex_lock(&globalqueue.lock);
+
+ if (!globalqueue_init) {
+ globalqueue.cur_threads = 0;
+ globalqueue.idle_threads = 0;
+ globalqueue.max_threads = MAX_GLOBAL_THREADS;
+ globalqueue.min_threads = MIN_GLOBAL_THREADS;
+ QTAILQ_INIT(&globalqueue.request_list);
+ qemu_mutex_init(&globalqueue.lock);
+ qemu_cond_init(&globalqueue.cond);
+
+ globalqueue_init = 1;
+ }
+
+ if (globalqueue.idle_threads == 0 &&
+ globalqueue.cur_threads < globalqueue.max_threads)
+ spawn_threadlet(&globalqueue);
aiocb->work.func = aio_thread;
- QTAILQ_INSERT_TAIL(&request_list, &aiocb->work, node);
- mutex_unlock(&lock);
- cond_signal(&cond);
+
+ QTAILQ_INSERT_TAIL(&globalqueue.request_list, &aiocb->work, node);
+ qemu_cond_signal(&globalqueue.cond);
+ qemu_mutex_unlock(&globalqueue.lock);
}
static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
@@ -564,14 +568,14 @@ static void paio_cancel(BlockDriverAIOCB *blockacb)
struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb;
int active = 0;
- mutex_lock(&lock);
+ qemu_mutex_lock(&globalqueue.lock);
if (!acb->active) {
- QTAILQ_REMOVE(&request_list, &acb->work, node);
+ QTAILQ_REMOVE(&globalqueue.request_list, &acb->work, node);
acb->ret = -ECANCELED;
} else if (acb->ret == -EINPROGRESS) {
active = 1;
}
- mutex_unlock(&lock);
+ qemu_mutex_unlock(&globalqueue.lock);
qemu_mutex_lock(&aiocb_mutex);
if (!active) {
@@ -652,7 +656,6 @@ int paio_init(void)
struct sigaction act;
PosixAioState *s;
int fds[2];
- int ret;
if (posix_aio_state)
return 0;
@@ -682,16 +685,6 @@ int paio_init(void)
qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush,
posix_aio_process_queue, s);
- ret = pthread_attr_init(&attr);
- if (ret)
- die2(ret, "pthread_attr_init");
-
- ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
- if (ret)
- die2(ret, "pthread_attr_setdetachstate");
-
- QTAILQ_INIT(&request_list);
-
posix_aio_state = s;
return 0;
}
- [Qemu-devel] [PATCH 00/13] Threadlets infrastructure., Arun R Bharadwaj, 2011/01/04
- [Qemu-devel] [PATCH 01/13] Add aiocb_mutex and aiocb_completion., Arun R Bharadwaj, 2011/01/04
- [Qemu-devel] [PATCH 02/13] Introduce work concept in posix-aio-compat.c, Arun R Bharadwaj, 2011/01/04
- [Qemu-devel] [PATCH 03/13] Add callback function to ThreadletWork structure., Arun R Bharadwaj, 2011/01/04
- [Qemu-devel] [PATCH 04/13] Add ThreadletQueue.,
Arun R Bharadwaj <=
- [Qemu-devel] [PATCH 05/13] Threadlet: Add submit_work threadlet API., Arun R Bharadwaj, 2011/01/04
- [Qemu-devel] [PATCH 06/13] Threadlet: Add dequeue_work threadlet API, Arun R Bharadwaj, 2011/01/04
- [Qemu-devel] [PATCH 07/13] Remove active field in qemu_aiocb structure., Arun R Bharadwaj, 2011/01/04
- [Qemu-devel] [PATCH 08/13] Remove thread_create routine., Arun R Bharadwaj, 2011/01/04