From 557a63fb212e89f0448b08025fb5c88c46ea198e Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Tue, 11 Feb 2025 19:15:36 +0100 Subject: [PATCH v8 2/3] Bump postmaster soft open file limit (RLIMIT_NOFILE) when necessary The default open file limit of 1024 on Linux is extremely low. The reason that this hasn't changed change is because doing so would break legacy programs that use the select(2) system call in hard to debug ways. So instead programs that want to opt-in to a higher open file limit are expected to bump their soft limit to their hard limit on startup. Details on this are very well explained in a blogpost by the systemd author[1]. There's also a similar change done by the Go language[2]. This starts bumping postmaster its soft open file limit when we realize that we'll run into the soft limit with the requested max_files_per_process GUC. We do so by slightly changing the meaning of the max_files_per_process GUC. The actual (not publicly exposed) limit is max_safe_fds, previously this would be set to: max_files_per_process - already_open_files - NUM_RESERVED_FDS After this change we now try to set max_safe_fds to max_files_per_process if the system allows that. This is deemed more natural to understand for users, because now the limit of files that they can open is actually what they configured in max_files_per_process. Adding this infrastructure to change RLIMIT_NOFILE when needed is especially useful for the AIO work that Andres is doing, because io_uring consumes a lot of file descriptors. Even without looking at AIO there is a large number of reports from people that require changing their soft file limit before starting Postgres, sometimes falling back to lowering max_files_per_process when they fail to do so[3-8]. It's also not all that strange to fail at setting the soft open file limit because there are multiple places where one can configure such limits and usually only one of them is effective (which one depends on how Postgres is started). In cloud environments its also often not possible for user to change the soft limit, because they don't control the way that Postgres is started. One thing to note is that we temporarily restore the original soft limit when shell-ing out to other executables. This is done as a precaution in case those executables are using select(2). [1]: https://0pointer.net/blog/file-descriptor-limits.html [2]: https://github.com/golang/go/issues/46279 [3]: https://serverfault.com/questions/785330/getting-too-many-open-files-error-for-postgres [4]: https://serverfault.com/questions/716982/how-to-raise-max-no-of-file-descriptors-for-daemons-running-on-debian-jessie [5]: https://www.postgresql.org/message-id/flat/CAKtc8vXh7NvP_qWj8EqqorPY97bvxSaX3h5u7a9PptRFHW5x7g%40mail.gmail.com [6]: https://www.postgresql.org/message-id/flat/113ce31b0908120955w77029099i7ececc053084095a%40mail.gmail.com [7]: https://github.com/abiosoft/colima/discussions/836 [8]: https://www.postgresql.org/message-id/flat/29663.1007738957%40sss.pgh.pa.us#2079ec9e2d8b251593812a3711bfe9e9 --- src/backend/storage/file/fd.c | 199 +++++++++++++++++++++++++++++++--- 1 file changed, 184 insertions(+), 15 deletions(-) diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index 9f3b58c3767..37de12fbb7e 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -158,6 +158,13 @@ int max_files_per_process = 1000; */ int max_safe_fds = FD_MINFREE; /* default if not changed */ +#ifdef HAVE_GETRLIMIT +static bool saved_original_max_open_files; +static struct rlimit original_max_open_files; +static struct rlimit custom_max_open_files; +#endif + + /* Whether it is safe to continue running after fsync() fails. */ bool data_sync_retry = false; @@ -943,6 +950,152 @@ InitTemporaryFileAccess(void) #endif } +/* + * Returns true if the passed in highestfd is the last one that we're allowed + * to open based on our. This should only be called if + */ +static bool +IsOpenFileLimit(int highestfd) +{ +#ifdef HAVE_GETRLIMIT + if (!saved_original_max_open_files) + { + return false; + } + + return highestfd >= custom_max_open_files.rlim_cur - 1; +#else + return false; +#endif +} + +/* + * Increases the open file limit (RLIMIT_NOFILE) by the requested amount. + * Returns true if successful, false otherwise. + */ +static bool +IncreaseOpenFileLimit(int extra_files) +{ +#ifdef HAVE_GETRLIMIT + struct rlimit rlim; + + if (!saved_original_max_open_files) + { + return false; + } + + rlim = custom_max_open_files; + + /* If we're already at the max we reached our limit */ + if (rlim.rlim_cur == original_max_open_files.rlim_max) + return false; + + /* Otherwise try to increase the soft limit to what we need */ + rlim.rlim_cur = Min(rlim.rlim_cur + extra_files, rlim.rlim_max); + + if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) + { + /* We made sure not to exceed the hard limit, so this shouldn't fail */ + ereport(WARNING, (errmsg("setrlimit failed: %m"))); + return false; + } + + custom_max_open_files = rlim; + + elog(LOG, "increased open file limit to %ld", (long) rlim.rlim_cur); + + return true; +#else + return false; +#endif +} + +/* + * Saves the original open file limit (RLIMIT_NOFILE) the first time when this + * is called. If called again it's a no-op. + * + * Returns true if successful, false otherwise. + */ +static void +SaveOriginalOpenFileLimit(void) +{ +#ifdef HAVE_GETRLIMIT + int status; + + if (saved_original_max_open_files) + { + /* Already saved, no need to do it again */ + return; + } + + status = getrlimit(RLIMIT_NOFILE, &original_max_open_files); + if (status != 0) + { + ereport(WARNING, (errmsg("getrlimit failed: %m"))); + return; + } + + custom_max_open_files = original_max_open_files; + saved_original_max_open_files = true; + return; +#endif +} + +/* + * UseOriginalOpenFileLimit --- Makes the process use the original open file + * limit that was present at postmaster start. + * + * This should be called before spawning subprocesses that might use select(2) + * which can only handle file descriptors up to 1024. + */ +static void +UseOriginalOpenFileLimit(void) +{ +#ifdef HAVE_GETRLIMIT + if (!saved_original_max_open_files) + { + return; + } + + if (custom_max_open_files.rlim_cur == original_max_open_files.rlim_cur) + { + /* Not changed, so no need to call setrlimit at all */ + return; + } + + if (setrlimit(RLIMIT_NOFILE, &original_max_open_files) != 0) + { + ereport(WARNING, (errmsg("setrlimit failed: %m"))); + } +#endif +} + +/* + * UseCustomOpenFileLimit --- Makes the process use our custom open file limit + * after that we configured based on the max_files_per_process GUC. + */ +static void +UseCustomOpenFileLimit(void) +{ +#ifdef HAVE_GETRLIMIT + if (!saved_original_max_open_files) + { + return; + } + + if (custom_max_open_files.rlim_cur == original_max_open_files.rlim_cur) + { + /* Not changed, so no need to call setrlimit at all */ + return; + } + + if (setrlimit(RLIMIT_NOFILE, &custom_max_open_files) != 0) + { + ereport(WARNING, (errmsg("setrlimit failed: %m"))); + } +#endif +} + /* * count_usable_fds --- count how many FDs the system will let us open, * and estimate how many are already open. @@ -966,38 +1119,39 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open) int highestfd = 0; int j; -#ifdef HAVE_GETRLIMIT - struct rlimit rlim; - int getrlimit_status; -#endif - size = 1024; fd = (int *) palloc(size * sizeof(int)); -#ifdef HAVE_GETRLIMIT - getrlimit_status = getrlimit(RLIMIT_NOFILE, &rlim); - if (getrlimit_status != 0) - ereport(WARNING, (errmsg("getrlimit failed: %m"))); -#endif /* HAVE_GETRLIMIT */ + SaveOriginalOpenFileLimit(); /* dup until failure or probe limit reached */ for (;;) { int thisfd; -#ifdef HAVE_GETRLIMIT - /* * don't go beyond RLIMIT_NOFILE; causes irritating kernel logs on * some platforms */ - if (getrlimit_status == 0 && highestfd >= rlim.rlim_cur - 1) - break; -#endif + if (IsOpenFileLimit(highestfd)) + { + if (!IncreaseOpenFileLimit(max_to_probe - used)) + break; + } thisfd = dup(2); if (thisfd < 0) { + /* + * Eventhough we do the pre-check above, it's still possible that + * the call to dup fails with EMFILE. This can happen if the last + * file descriptor was already assigned to an "already open" file. + * One example of this happening, is if we're already at the soft + * limit when we call count_usable_fds. + */ + if (errno == EMFILE && IncreaseOpenFileLimit(max_to_probe - used)) + continue; + /* Expect EMFILE or ENFILE, else it's fishy */ if (errno != EMFILE && errno != ENFILE) elog(WARNING, "duplicating stderr file descriptor failed after %d successes: %m", used); @@ -2747,6 +2901,7 @@ pg_system(const char *command, uint32 wait_event_info) { int rc; + UseOriginalOpenFileLimit(); fflush(NULL); pgstat_report_wait_start(wait_event_info); @@ -2769,6 +2924,7 @@ pg_system(const char *command, uint32 wait_event_info) PostRestoreCommand(); pgstat_report_wait_end(); + UseCustomOpenFileLimit(); return rc; } @@ -2802,6 +2958,19 @@ OpenPipeStream(const char *command, const char *mode) ReleaseLruFiles(); TryAgain: + + /* + * It would be great if we could call UseOriginalOpenFileLimit here, but + * since popen() also opens a file in the current process (this side of the + * pipe) we cannot do so safely. Because we might already have many more + * files open than the original limit. + * + * The only way to address this would be implementing a custom popen() that + * calls UseOriginalOpenFileLimit only around the actual fork call, but + * that seems too much effort to handle the corner case where this external + * command uses both select() and tries to open more files than select() + * allows for. + */ fflush(NULL); pqsignal(SIGPIPE, SIG_DFL); errno = 0; -- 2.51.1