From fa7aac1bc9c0a47fbdbd9459424f08fa61b71ce2 Mon Sep 17 00:00:00 2001 From: Thomas Munro Date: Fri, 11 Apr 2025 21:17:26 +1200 Subject: [PATCH v2 1/2] aio: Try repeatedly to give batched IOs to workers. Previously, when the submission queue was full we'd run all remaining IOs in a batched submissoin synchronously. Andres rightly pointed out that we should really try again between synchronous IOs, since the workers might have made progress in draining the queue. Suggested-by: Andres Freund Discussion: https://postgr.es/m/CA%2BhUKG%2Bm4xV0LMoH2c%3DoRAdEXuCnh%2BtGBTWa7uFeFMGgTLAw%2BQ%40mail.gmail.com --- src/backend/storage/aio/method_worker.c | 30 ++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c index bf8f77e6ff6..9a82d5f847d 100644 --- a/src/backend/storage/aio/method_worker.c +++ b/src/backend/storage/aio/method_worker.c @@ -282,12 +282,36 @@ pgaio_worker_submit_internal(int num_staged_ios, PgAioHandle **staged_ios) SetLatch(wakeup); /* Run whatever is left synchronously. */ - if (nsync > 0) + for (int i = 0; i < nsync; ++i) { - for (int i = 0; i < nsync; ++i) + wakeup = NULL; + + /* + * Between synchronous IO operations, try again to enqueue as many as + * we can. + */ + if (i > 0) { - pgaio_io_perform_synchronously(synchronous_ios[i]); + wakeup = NULL; + + LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE); + while (i < nsync && + pgaio_worker_submission_queue_insert(synchronous_ios[i])) + { + if (wakeup == NULL && (worker = pgaio_worker_choose_idle()) >= 0) + wakeup = io_worker_control->workers[worker].latch; + i++; + } + LWLockRelease(AioWorkerSubmissionQueueLock); + + if (wakeup) + SetLatch(wakeup); + + if (i == nsync) + break; } + + pgaio_io_perform_synchronously(synchronous_ios[i]); } } -- 2.47.2