From b6f74632599b30ee5479b548e4a906839eb308e7 Mon Sep 17 00:00:00 2001 From: Georgios Kokolatos Date: Mon, 19 Dec 2022 15:16:45 +0000 Subject: [PATCH v18 2/3] Introduce Compressor API in pg_dump The purpose of this API is to allow for easier addition of new compression methods. CompressFileHandle is substituting the cfp* family of functions under a struct of function pointers for opening, writing, etc. The implementor of a new compression method is now able to "simply" just add those definitions. Custom compressed archives now need to store the compression algorithm in their header. This requires a bump in the version number. The level of compression is no longer stored in the dump as it is irrelevant. --- src/bin/pg_dump/Makefile | 1 + src/bin/pg_dump/compress_gzip.c | 398 +++++++++++ src/bin/pg_dump/compress_gzip.h | 22 + src/bin/pg_dump/compress_io.c | 918 +++++++------------------- src/bin/pg_dump/compress_io.h | 71 +- src/bin/pg_dump/meson.build | 1 + src/bin/pg_dump/pg_backup_archiver.c | 102 +-- src/bin/pg_dump/pg_backup_archiver.h | 5 +- src/bin/pg_dump/pg_backup_custom.c | 23 +- src/bin/pg_dump/pg_backup_directory.c | 94 +-- src/bin/pg_dump/t/002_pg_dump.pl | 10 +- src/tools/pginclude/cpluspluscheck | 1 + src/tools/pgindent/typedefs.list | 2 + 13 files changed, 846 insertions(+), 802 deletions(-) create mode 100644 src/bin/pg_dump/compress_gzip.c create mode 100644 src/bin/pg_dump/compress_gzip.h diff --git a/src/bin/pg_dump/Makefile b/src/bin/pg_dump/Makefile index 9dc5a784dd..29eab02d37 100644 --- a/src/bin/pg_dump/Makefile +++ b/src/bin/pg_dump/Makefile @@ -24,6 +24,7 @@ LDFLAGS_INTERNAL += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) OBJS = \ $(WIN32RES) \ + compress_gzip.o \ compress_io.o \ dumputils.o \ parallel.o \ diff --git a/src/bin/pg_dump/compress_gzip.c b/src/bin/pg_dump/compress_gzip.c new file mode 100644 index 0000000000..95e1d6c276 --- /dev/null +++ b/src/bin/pg_dump/compress_gzip.c @@ -0,0 +1,398 @@ +/*------------------------------------------------------------------------- + * + * compress_gzip.c + * Routines for archivers to write an uncompressed or compressed data + * stream. + * + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/bin/pg_dump/compress_gzip.c + * + *------------------------------------------------------------------------- + */ +#include "postgres_fe.h" +#include + +#include "compress_gzip.h" +#include "pg_backup_utils.h" + +#ifdef HAVE_LIBZ +#include "zlib.h" + +/*---------------------- + * Compressor API + *---------------------- + */ +typedef struct GzipCompressorState +{ + z_streamp zp; + + void *outbuf; + size_t outsize; +} GzipCompressorState; + +/* Private routines that support gzip compressed data I/O */ +static void +DeflateCompressorGzip(ArchiveHandle *AH, CompressorState *cs, bool flush) +{ + GzipCompressorState *gzipcs = (GzipCompressorState *) cs->private_data; + z_streamp zp = gzipcs->zp; + void *out = gzipcs->outbuf; + int res = Z_OK; + + while (gzipcs->zp->avail_in != 0 || flush) + { + res = deflate(zp, flush ? Z_FINISH : Z_NO_FLUSH); + if (res == Z_STREAM_ERROR) + pg_fatal("could not compress data: %s", zp->msg); + if ((flush && (zp->avail_out < gzipcs->outsize)) + || (zp->avail_out == 0) + || (zp->avail_in != 0) + ) + { + /* + * Extra paranoia: avoid zero-length chunks, since a zero length + * chunk is the EOF marker in the custom format. This should never + * happen but... + */ + if (zp->avail_out < gzipcs->outsize) + { + /* + * Any write function should do its own error checking but to + * make sure we do a check here as well... + */ + size_t len = gzipcs->outsize - zp->avail_out; + + cs->writeF(AH, (char *) out, len); + } + zp->next_out = out; + zp->avail_out = gzipcs->outsize; + } + + if (res == Z_STREAM_END) + break; + } +} + +static void +EndCompressorGzip(ArchiveHandle *AH, CompressorState *cs) +{ + GzipCompressorState *gzipcs = (GzipCompressorState *) cs->private_data; + z_streamp zp; + + if (gzipcs->zp) + { + zp = gzipcs->zp; + zp->next_in = NULL; + zp->avail_in = 0; + + /* Flush any remaining data from zlib buffer */ + DeflateCompressorGzip(AH, cs, true); + + if (deflateEnd(zp) != Z_OK) + pg_fatal("could not close compression stream: %s", zp->msg); + + pg_free(gzipcs->outbuf); + pg_free(gzipcs->zp); + } + + pg_free(gzipcs); + cs->private_data = NULL; +} + +static void +WriteDataToArchiveGzip(ArchiveHandle *AH, CompressorState *cs, + const void *data, size_t dLen) +{ + GzipCompressorState *gzipcs = (GzipCompressorState *) cs->private_data; + z_streamp zp; + + if (!gzipcs->zp) + { + zp = gzipcs->zp = (z_streamp) pg_malloc(sizeof(z_stream)); + zp->zalloc = Z_NULL; + zp->zfree = Z_NULL; + zp->opaque = Z_NULL; + + /* + * outsize is the buffer size we tell zlib it can output to. We + * actually allocate one extra byte because some routines want to + * append a trailing zero byte to the zlib output. + */ + gzipcs->outbuf = pg_malloc(ZLIB_OUT_SIZE + 1); + gzipcs->outsize = ZLIB_OUT_SIZE; + + if (deflateInit(zp, cs->compression_spec.level) != Z_OK) + pg_fatal("could not initialize compression library: %s", zp->msg); + + /* Just be paranoid - maybe End is called after Start, with no Write */ + zp->next_out = gzipcs->outbuf; + zp->avail_out = gzipcs->outsize; + } + + gzipcs->zp->next_in = (void *) unconstify(void *, data); + gzipcs->zp->avail_in = dLen; + DeflateCompressorGzip(AH, cs, false); +} + +static void +ReadDataFromArchiveGzip(ArchiveHandle *AH, CompressorState *cs) +{ + z_streamp zp; + char *out; + int res = Z_OK; + size_t cnt; + char *buf; + size_t buflen; + + zp = (z_streamp) pg_malloc(sizeof(z_stream)); + zp->zalloc = Z_NULL; + zp->zfree = Z_NULL; + zp->opaque = Z_NULL; + + buf = pg_malloc(ZLIB_IN_SIZE); + buflen = ZLIB_IN_SIZE; + + out = pg_malloc(ZLIB_OUT_SIZE + 1); + + if (inflateInit(zp) != Z_OK) + pg_fatal("could not initialize compression library: %s", + zp->msg); + + /* no minimal chunk size for zlib */ + while ((cnt = cs->readF(AH, &buf, &buflen))) + { + zp->next_in = (void *) buf; + zp->avail_in = cnt; + + while (zp->avail_in > 0) + { + zp->next_out = (void *) out; + zp->avail_out = ZLIB_OUT_SIZE; + + res = inflate(zp, 0); + if (res != Z_OK && res != Z_STREAM_END) + pg_fatal("could not uncompress data: %s", zp->msg); + + out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; + ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); + } + } + + zp->next_in = NULL; + zp->avail_in = 0; + while (res != Z_STREAM_END) + { + zp->next_out = (void *) out; + zp->avail_out = ZLIB_OUT_SIZE; + res = inflate(zp, 0); + if (res != Z_OK && res != Z_STREAM_END) + pg_fatal("could not uncompress data: %s", zp->msg); + + out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; + ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); + } + + if (inflateEnd(zp) != Z_OK) + pg_fatal("could not close compression library: %s", zp->msg); + + free(buf); + free(out); + free(zp); +} + +/* Public routines that support gzip compressed data I/O */ +void +InitCompressorGzip(CompressorState *cs, const pg_compress_specification compression_spec) +{ + GzipCompressorState *gzipcs; + + cs->readData = ReadDataFromArchiveGzip; + cs->writeData = WriteDataToArchiveGzip; + cs->end = EndCompressorGzip; + + cs->compression_spec = compression_spec; + + gzipcs = (GzipCompressorState *) pg_malloc0(sizeof(GzipCompressorState)); + + cs->private_data = gzipcs; +} + + +/*---------------------- + * Compress File API + *---------------------- + */ + +static size_t +Gzip_read(void *ptr, size_t size, CompressFileHandle *CFH) +{ + gzFile gzfp = (gzFile) CFH->private_data; + size_t ret; + + ret = gzread(gzfp, ptr, size); + if (ret != size && !gzeof(gzfp)) + { + int errnum; + const char *errmsg = gzerror(gzfp, &errnum); + + pg_fatal("could not read from input file: %s", + errnum == Z_ERRNO ? strerror(errno) : errmsg); + } + + return ret; +} + +static size_t +Gzip_write(const void *ptr, size_t size, CompressFileHandle *CFH) +{ + gzFile gzfp = (gzFile) CFH->private_data; + + return gzwrite(gzfp, ptr, size); +} + +static int +Gzip_getc(CompressFileHandle *CFH) +{ + gzFile gzfp = (gzFile) CFH->private_data; + int ret; + + errno = 0; + ret = gzgetc(gzfp); + if (ret == EOF) + { + if (!gzeof(gzfp)) + pg_fatal("could not read from input file: %s", strerror(errno)); + else + pg_fatal("could not read from input file: end of file"); + } + + return ret; +} + +static char * +Gzip_gets(char *ptr, int size, CompressFileHandle *CFH) +{ + gzFile gzfp = (gzFile) CFH->private_data; + + return gzgets(gzfp, ptr, size); +} + +static int +Gzip_close(CompressFileHandle *CFH) +{ + gzFile gzfp = (gzFile) CFH->private_data; + int save_errno; + int ret; + + CFH->private_data = NULL; + + ret = gzclose(gzfp); + + save_errno = errno; + errno = save_errno; + + return ret; +} + +static int +Gzip_eof(CompressFileHandle *CFH) +{ + gzFile gzfp = (gzFile) CFH->private_data; + + return gzeof(gzfp); +} + +static const char * +Gzip_get_error(CompressFileHandle *CFH) +{ + gzFile gzfp = (gzFile) CFH->private_data; + const char *errmsg; + int errnum; + + errmsg = gzerror(gzfp, &errnum); + if (errnum == Z_ERRNO) + errmsg = strerror(errno); + + return errmsg; +} + +static int +Gzip_open(const char *path, int fd, const char *mode, CompressFileHandle *CFH) +{ + gzFile gzfp; + char mode_compression[32]; + + if (CFH->compression_spec.level != Z_DEFAULT_COMPRESSION) + { + /* + * user has specified a compression level, so tell zlib to use it + */ + snprintf(mode_compression, sizeof(mode_compression), "%s%d", + mode, CFH->compression_spec.level); + } + else + strcpy(mode_compression, mode); + + if (fd >= 0) + gzfp = gzdopen(dup(fd), mode_compression); + else + gzfp = gzopen(path, mode_compression); + + if (gzfp == NULL) + return 1; + + CFH->private_data = gzfp; + + return 0; +} + +static int +Gzip_open_write(const char *path, const char *mode, CompressFileHandle *CFH) +{ + char *fname; + int ret; + int save_errno; + + fname = psprintf("%s.gz", path); + ret = CFH->open_func(fname, -1, mode, CFH); + + save_errno = errno; + pg_free(fname); + errno = save_errno; + + return ret; +} + +void +InitCompressGzip(CompressFileHandle *CFH, const pg_compress_specification compression_spec) +{ + CFH->open_func = Gzip_open; + CFH->open_write_func = Gzip_open_write; + CFH->read_func = Gzip_read; + CFH->write_func = Gzip_write; + CFH->gets_func = Gzip_gets; + CFH->getc_func = Gzip_getc; + CFH->close_func = Gzip_close; + CFH->eof_func = Gzip_eof; + CFH->get_error_func = Gzip_get_error; + + CFH->compression_spec = compression_spec; + + CFH->private_data = NULL; +} +#else /* HAVE_LIBZ */ +void +InitCompressorGzip(CompressorState *cs, const pg_compress_specification compression_spec) +{ + pg_fatal("this build does not support compression with %s", "gzip"); +} + +void +InitCompressGzip(CompressFileHandle *CFH, const pg_compress_specification compression_spec) +{ + pg_fatal("this build does not support compression with %s", "gzip"); +} +#endif /* HAVE_LIBZ */ diff --git a/src/bin/pg_dump/compress_gzip.h b/src/bin/pg_dump/compress_gzip.h new file mode 100644 index 0000000000..6dfd0eb04d --- /dev/null +++ b/src/bin/pg_dump/compress_gzip.h @@ -0,0 +1,22 @@ +/*------------------------------------------------------------------------- + * + * compress_gzip.h + * Interface to compress_io.c routines + * + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/bin/pg_dump/compress_gzip.h + * + *------------------------------------------------------------------------- + */ +#ifndef _COMPRESS_GZIP_H_ +#define _COMPRESS_GZIP_H_ + +#include "compress_io.h" + +extern void InitCompressorGzip(CompressorState *cs, const pg_compress_specification compression_spec); +extern void InitCompressGzip(CompressFileHandle *CFH, const pg_compress_specification compression_spec); + +#endif /* _COMPRESS_GZIP_H_ */ diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c index bbac154669..4fe5b262ad 100644 --- a/src/bin/pg_dump/compress_io.c +++ b/src/bin/pg_dump/compress_io.c @@ -9,42 +9,44 @@ * * This file includes two APIs for dealing with compressed data. The first * provides more flexibility, using callbacks to read/write data from the - * underlying stream. The second API is a wrapper around fopen/gzopen and + * underlying stream. The second API is a wrapper around fopen and * friends, providing an interface similar to those, but abstracts away - * the possible compression. Both APIs use libz for the compression, but - * the second API uses gzip headers, so the resulting files can be easily - * manipulated with the gzip utility. + * the possible compression. The second API is aimed for the resulting + * files can be easily manipulated with an external compression utility + * program. * * Compressor API * -------------- * * The interface for writing to an archive consists of three functions: - * AllocateCompressor, WriteDataToArchive and EndCompressor. First you call - * AllocateCompressor, then write all the data by calling WriteDataToArchive - * as many times as needed, and finally EndCompressor. WriteDataToArchive - * and EndCompressor will call the WriteFunc that was provided to - * AllocateCompressor for each chunk of compressed data. + * AllocateCompressor, writeData, and EndCompressor. First you call + * AllocateCompressor, then write all the data by calling writeData as many + * times as needed, and finally EndCompressor. writeData will call the + * WriteFunc that was provided to AllocateCompressor for each chunk of + * compressed data. * - * The interface for reading an archive consists of just one function: - * ReadDataFromArchive. ReadDataFromArchive reads the whole compressed input - * stream, by repeatedly calling the given ReadFunc. ReadFunc returns the - * compressed data chunk at a time, and ReadDataFromArchive decompresses it - * and passes the decompressed data to ahwrite(), until ReadFunc returns 0 - * to signal EOF. - * - * The interface is the same for compressed and uncompressed streams. + * The interface for reading an archive consists of the same three functions: + * AllocateCompressor, readData, and EndCompressor. First you call + * AllocateCompressor, then read all the data by calling readData to read the + * whole compressed stream which repeatedly calls the given ReadFunc. ReadFunc + * returns the compressed data chunk at a time, and readData decompresses it + * and passes the decompressed data to ahwrite(), until ReadFunc returns 0 to + * signal EOF. The interface is the same for compressed and uncompressed + * streams. * * Compressed stream API * ---------------------- * * The compressed stream API is a wrapper around the C standard fopen() and - * libz's gzopen() APIs. It allows you to use the same functions for - * compressed and uncompressed streams. cfopen_read() first tries to open - * the file with given name, and if it fails, it tries to open the same - * file with the .gz suffix. cfopen_write() opens a file for writing, an - * extra argument specifies if the file should be compressed, and adds the - * .gz suffix to the filename if so. This allows you to easily handle both - * compressed and uncompressed files. + * libz's gzopen() APIs and custom LZ4 calls which provide similar + * functionality. It allows you to use the same functions for compressed and + * uncompressed streams. cfopen_read() first tries to open the file with given + * name, and if it fails, it tries to open the same file with the .gz suffix, + * failing that it tries to open the same file with the .lz4 suffix. + * cfopen_write() opens a file for writing, an extra argument specifies the + * method to use should the file be compressed, and adds the appropriate + * suffix, .gz or .lz4, to the filename if so. This allows you to easily handle + * both compressed and uncompressed files. * * IDENTIFICATION * src/bin/pg_dump/compress_io.c @@ -53,7 +55,11 @@ */ #include "postgres_fe.h" +#include +#include + #include "compress_io.h" +#include "compress_gzip.h" #include "pg_backup_utils.h" #ifdef HAVE_LIBZ @@ -65,85 +71,70 @@ *---------------------- */ -/* typedef appears in compress_io.h */ -struct CompressorState +/* Private routines that support uncompressed data I/O */ +static void +ReadDataFromArchiveNone(ArchiveHandle *AH, CompressorState *cs) { - pg_compress_specification compression_spec; - WriteFunc writeF; + size_t cnt; + char *buf; + size_t buflen; -#ifdef HAVE_LIBZ - z_streamp zp; - char *zlibOut; - size_t zlibOutSize; -#endif -}; + buf = pg_malloc(ZLIB_OUT_SIZE); + buflen = ZLIB_OUT_SIZE; + + while ((cnt = cs->readF(AH, &buf, &buflen))) + { + ahwrite(buf, 1, cnt, AH); + } + + free(buf); +} -/* Routines that support zlib compressed data I/O */ -#ifdef HAVE_LIBZ -static void InitCompressorZlib(CompressorState *cs, int level); -static void DeflateCompressorZlib(ArchiveHandle *AH, CompressorState *cs, - bool flush); -static void ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF); -static void WriteDataToArchiveZlib(ArchiveHandle *AH, CompressorState *cs, - const char *data, size_t dLen); -static void EndCompressorZlib(ArchiveHandle *AH, CompressorState *cs); -#endif -/* Routines that support uncompressed data I/O */ -static void ReadDataFromArchiveNone(ArchiveHandle *AH, ReadFunc readF); -static void WriteDataToArchiveNone(ArchiveHandle *AH, CompressorState *cs, - const char *data, size_t dLen); +static void +WriteDataToArchiveNone(ArchiveHandle *AH, CompressorState *cs, + const void *data, size_t dLen) +{ + cs->writeF(AH, data, dLen); +} + +static void +EndCompressorNone(ArchiveHandle *AH, CompressorState *cs) +{ + /* no op */ +} + +static void +InitCompressorNone(CompressorState *cs, + const pg_compress_specification compression_spec) +{ + cs->readData = ReadDataFromArchiveNone; + cs->writeData = WriteDataToArchiveNone; + cs->end = EndCompressorNone; + + cs->compression_spec = compression_spec; +} /* Public interface routines */ /* Allocate a new compressor */ CompressorState * AllocateCompressor(const pg_compress_specification compression_spec, - WriteFunc writeF) + ReadFunc readF, WriteFunc writeF) { CompressorState *cs; -#ifndef HAVE_LIBZ - if (compression_spec.algorithm == PG_COMPRESSION_GZIP) - pg_fatal("this build does not support compression with %s", "gzip"); -#endif - cs = (CompressorState *) pg_malloc0(sizeof(CompressorState)); + cs->readF = readF; cs->writeF = writeF; - cs->compression_spec = compression_spec; - /* - * Perform compression algorithm specific initialization. - */ -#ifdef HAVE_LIBZ - if (cs->compression_spec.algorithm == PG_COMPRESSION_GZIP) - InitCompressorZlib(cs, cs->compression_spec.level); -#endif - - return cs; -} - -/* - * Read all compressed data from the input stream (via readF) and print it - * out with ahwrite(). - */ -void -ReadDataFromArchive(ArchiveHandle *AH, - const pg_compress_specification compression_spec, - ReadFunc readF) -{ switch (compression_spec.algorithm) { case PG_COMPRESSION_NONE: - ReadDataFromArchiveNone(AH, readF); + InitCompressorNone(cs, compression_spec); break; case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - ReadDataFromArchiveZlib(AH, readF); -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif + InitCompressorGzip(cs, compression_spec); break; case PG_COMPRESSION_LZ4: pg_fatal("compression with %s is not yet supported", "LZ4"); @@ -152,35 +143,8 @@ ReadDataFromArchive(ArchiveHandle *AH, pg_fatal("compression with %s is not yet supported", "ZSTD"); break; } -} -/* - * Compress and write data to the output stream (via writeF). - */ -void -WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs, - const void *data, size_t dLen) -{ - switch (cs->compression_spec.algorithm) - { - case PG_COMPRESSION_NONE: - WriteDataToArchiveNone(AH, cs, data, dLen); - break; - case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - WriteDataToArchiveZlib(AH, cs, data, dLen); -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; - } + return cs; } /* @@ -189,402 +153,178 @@ WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs, void EndCompressor(ArchiveHandle *AH, CompressorState *cs) { - switch (cs->compression_spec.algorithm) - { - case PG_COMPRESSION_NONE: - break; - case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - EndCompressorZlib(AH, cs); -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; - } - - free(cs); + cs->end(AH, cs); + pg_free(cs); } -/* Private routines, specific to each compression method. */ - -#ifdef HAVE_LIBZ -/* - * Functions for zlib compressed output. +/*---------------------- + * Compressed stream API + *---------------------- */ -static void -InitCompressorZlib(CompressorState *cs, int level) +static int +hasSuffix(const char *filename, const char *suffix) { - z_streamp zp; - - zp = cs->zp = (z_streamp) pg_malloc(sizeof(z_stream)); - zp->zalloc = Z_NULL; - zp->zfree = Z_NULL; - zp->opaque = Z_NULL; - - /* - * zlibOutSize is the buffer size we tell zlib it can output to. We - * actually allocate one extra byte because some routines want to append a - * trailing zero byte to the zlib output. - */ - cs->zlibOut = (char *) pg_malloc(ZLIB_OUT_SIZE + 1); - cs->zlibOutSize = ZLIB_OUT_SIZE; - - if (deflateInit(zp, level) != Z_OK) - pg_fatal("could not initialize compression library: %s", - zp->msg); - - /* Just be paranoid - maybe End is called after Start, with no Write */ - zp->next_out = (void *) cs->zlibOut; - zp->avail_out = cs->zlibOutSize; + int filenamelen = strlen(filename); + int suffixlen = strlen(suffix); + + if (filenamelen < suffixlen) + return 0; + + return memcmp(&filename[filenamelen - suffixlen], + suffix, + suffixlen) == 0; } +/* free() without changing errno; useful in several places below */ static void -EndCompressorZlib(ArchiveHandle *AH, CompressorState *cs) +free_keep_errno(void *p) { - z_streamp zp = cs->zp; - - zp->next_in = NULL; - zp->avail_in = 0; - - /* Flush any remaining data from zlib buffer */ - DeflateCompressorZlib(AH, cs, true); - - if (deflateEnd(zp) != Z_OK) - pg_fatal("could not close compression stream: %s", zp->msg); + int save_errno = errno; - free(cs->zlibOut); - free(cs->zp); + free(p); + errno = save_errno; } -static void -DeflateCompressorZlib(ArchiveHandle *AH, CompressorState *cs, bool flush) +/* + * Compression None implementation + */ +static size_t +read_none(void *ptr, size_t size, CompressFileHandle *CFH) { - z_streamp zp = cs->zp; - char *out = cs->zlibOut; - int res = Z_OK; + FILE *fp = (FILE *) CFH->private_data; + size_t ret; - while (cs->zp->avail_in != 0 || flush) - { - res = deflate(zp, flush ? Z_FINISH : Z_NO_FLUSH); - if (res == Z_STREAM_ERROR) - pg_fatal("could not compress data: %s", zp->msg); - if ((flush && (zp->avail_out < cs->zlibOutSize)) - || (zp->avail_out == 0) - || (zp->avail_in != 0) - ) - { - /* - * Extra paranoia: avoid zero-length chunks, since a zero length - * chunk is the EOF marker in the custom format. This should never - * happen but... - */ - if (zp->avail_out < cs->zlibOutSize) - { - /* - * Any write function should do its own error checking but to - * make sure we do a check here as well... - */ - size_t len = cs->zlibOutSize - zp->avail_out; - - cs->writeF(AH, out, len); - } - zp->next_out = (void *) out; - zp->avail_out = cs->zlibOutSize; - } + if (size == 0) + return 0; - if (res == Z_STREAM_END) - break; - } + ret = fread(ptr, 1, size, fp); + if (ret != size && !feof(fp)) + pg_fatal("could not read from input file: %s", + strerror(errno)); + + return ret; } -static void -WriteDataToArchiveZlib(ArchiveHandle *AH, CompressorState *cs, - const char *data, size_t dLen) +static size_t +write_none(const void *ptr, size_t size, CompressFileHandle *CFH) { - cs->zp->next_in = (void *) unconstify(char *, data); - cs->zp->avail_in = dLen; - DeflateCompressorZlib(AH, cs, false); + return fwrite(ptr, 1, size, (FILE *) CFH->private_data); } -static void -ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF) +static const char * +get_error_none(CompressFileHandle *CFH) { - z_streamp zp; - char *out; - int res = Z_OK; - size_t cnt; - char *buf; - size_t buflen; - - zp = (z_streamp) pg_malloc(sizeof(z_stream)); - zp->zalloc = Z_NULL; - zp->zfree = Z_NULL; - zp->opaque = Z_NULL; - - buf = pg_malloc(ZLIB_IN_SIZE); - buflen = ZLIB_IN_SIZE; - - out = pg_malloc(ZLIB_OUT_SIZE + 1); - - if (inflateInit(zp) != Z_OK) - pg_fatal("could not initialize compression library: %s", - zp->msg); - - /* no minimal chunk size for zlib */ - while ((cnt = readF(AH, &buf, &buflen))) - { - zp->next_in = (void *) buf; - zp->avail_in = cnt; - - while (zp->avail_in > 0) - { - zp->next_out = (void *) out; - zp->avail_out = ZLIB_OUT_SIZE; + return strerror(errno); +} - res = inflate(zp, 0); - if (res != Z_OK && res != Z_STREAM_END) - pg_fatal("could not uncompress data: %s", zp->msg); +static char * +gets_none(char *ptr, int size, CompressFileHandle *CFH) +{ + return fgets(ptr, size, (FILE *) CFH->private_data); +} - out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; - ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); - } - } +static int +getc_none(CompressFileHandle *CFH) +{ + FILE *fp = (FILE *) CFH->private_data; + int ret; - zp->next_in = NULL; - zp->avail_in = 0; - while (res != Z_STREAM_END) + ret = fgetc(fp); + if (ret == EOF) { - zp->next_out = (void *) out; - zp->avail_out = ZLIB_OUT_SIZE; - res = inflate(zp, 0); - if (res != Z_OK && res != Z_STREAM_END) - pg_fatal("could not uncompress data: %s", zp->msg); - - out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; - ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); + if (!feof(fp)) + pg_fatal("could not read from input file: %s", strerror(errno)); + else + pg_fatal("could not read from input file: end of file"); } - if (inflateEnd(zp) != Z_OK) - pg_fatal("could not close compression library: %s", zp->msg); - - free(buf); - free(out); - free(zp); + return ret; } -#endif /* HAVE_LIBZ */ - -/* - * Functions for uncompressed output. - */ - -static void -ReadDataFromArchiveNone(ArchiveHandle *AH, ReadFunc readF) +static int +close_none(CompressFileHandle *CFH) { - size_t cnt; - char *buf; - size_t buflen; + FILE *fp = (FILE *) CFH->private_data; + int ret = 0; - buf = pg_malloc(ZLIB_OUT_SIZE); - buflen = ZLIB_OUT_SIZE; + CFH->private_data = NULL; - while ((cnt = readF(AH, &buf, &buflen))) - { - ahwrite(buf, 1, cnt, AH); - } + if (fp) + ret = fclose(fp); - free(buf); + return ret; } -static void -WriteDataToArchiveNone(ArchiveHandle *AH, CompressorState *cs, - const char *data, size_t dLen) + +static int +eof_none(CompressFileHandle *CFH) { - cs->writeF(AH, data, dLen); + return feof((FILE *) CFH->private_data); } - -/*---------------------- - * Compressed stream API - *---------------------- - */ - -/* - * cfp represents an open stream, wrapping the underlying FILE or gzFile - * pointer. This is opaque to the callers. - */ -struct cfp +static int +open_none(const char *path, int fd, const char *mode, CompressFileHandle *CFH) { - pg_compress_specification compression_spec; - void *fp; -}; + Assert(CFH->private_data == NULL); -#ifdef HAVE_LIBZ -static int hasSuffix(const char *filename, const char *suffix); -#endif + if (fd >= 0) + CFH->private_data = fdopen(dup(fd), mode); + else + CFH->private_data = fopen(path, mode); -/* free() without changing errno; useful in several places below */ -static void -free_keep_errno(void *p) -{ - int save_errno = errno; + if (CFH->private_data == NULL) + return 1; - free(p); - errno = save_errno; + return 0; } -/* - * Open a file for reading. 'path' is the file to open, and 'mode' should - * be either "r" or "rb". - * - * If the file at 'path' does not exist, we append the ".gz" suffix (if 'path' - * doesn't already have it) and try again. So if you pass "foo" as 'path', - * this will open either "foo" or "foo.gz". - * - * On failure, return NULL with an error code in errno. - */ -cfp * -cfopen_read(const char *path, const char *mode) +static int +open_write_none(const char *path, const char *mode, CompressFileHandle *CFH) { - cfp *fp; - - pg_compress_specification compression_spec = {0}; + Assert(CFH->private_data == NULL); -#ifdef HAVE_LIBZ - if (hasSuffix(path, ".gz")) - { - compression_spec.algorithm = PG_COMPRESSION_GZIP; - fp = cfopen(path, mode, compression_spec); - } - else -#endif - { - compression_spec.algorithm = PG_COMPRESSION_NONE; - fp = cfopen(path, mode, compression_spec); -#ifdef HAVE_LIBZ - if (fp == NULL) - { - char *fname; + CFH->private_data = fopen(path, mode); + if (CFH->private_data == NULL) + return 1; - fname = psprintf("%s.gz", path); - compression_spec.algorithm = PG_COMPRESSION_GZIP; - fp = cfopen(fname, mode, compression_spec); - free_keep_errno(fname); - } -#endif - } - return fp; + return 0; } -/* - * Open a file for writing. 'path' indicates the path name, and 'mode' must - * be a filemode as accepted by fopen() and gzopen() that indicates writing - * ("w", "wb", "a", or "ab"). - * - * If 'compression_spec.algorithm' is GZIP, a gzip compressed stream is opened, - * and 'compression_spec.level' used. The ".gz" suffix is automatically added to - * 'path' in that case. - * - * On failure, return NULL with an error code in errno. - */ -cfp * -cfopen_write(const char *path, const char *mode, - const pg_compress_specification compression_spec) +static void +InitCompressNone(CompressFileHandle *CFH, + const pg_compress_specification compression_spec) { - cfp *fp; - - if (compression_spec.algorithm == PG_COMPRESSION_NONE) - fp = cfopen(path, mode, compression_spec); - else - { -#ifdef HAVE_LIBZ - char *fname; - - fname = psprintf("%s.gz", path); - fp = cfopen(fname, mode, compression_spec); - free_keep_errno(fname); -#else - pg_fatal("this build does not support compression with %s", "gzip"); - fp = NULL; /* keep compiler quiet */ -#endif - } - return fp; + CFH->open_func = open_none; + CFH->open_write_func = open_write_none; + CFH->read_func = read_none; + CFH->write_func = write_none; + CFH->gets_func = gets_none; + CFH->getc_func = getc_none; + CFH->close_func = close_none; + CFH->eof_func = eof_none; + CFH->get_error_func = get_error_none; + + CFH->private_data = NULL; } /* - * This is the workhorse for cfopen() or cfdopen(). It opens file 'path' or - * associates a stream 'fd', if 'fd' is a valid descriptor, in 'mode'. The - * descriptor is not dup'ed and it is the caller's responsibility to do so. - * The caller must verify that the 'compress_algorithm' is supported by the - * current build. - * - * On failure, return NULL with an error code in errno. + * Public interface */ -static cfp * -cfopen_internal(const char *path, int fd, const char *mode, - pg_compress_specification compression_spec) +CompressFileHandle * +InitCompressFileHandle(const pg_compress_specification compression_spec) { - cfp *fp = pg_malloc(sizeof(cfp)); + CompressFileHandle *CFH; - fp->compression_spec = compression_spec; + CFH = pg_malloc0(sizeof(CompressFileHandle)); switch (compression_spec.algorithm) { case PG_COMPRESSION_NONE: - if (fd >= 0) - fp->fp = fdopen(fd, mode); - else - fp->fp = fopen(path, mode); - if (fp->fp == NULL) - { - free_keep_errno(fp); - fp = NULL; - } - + InitCompressNone(CFH, compression_spec); break; case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - if (compression_spec.level != Z_DEFAULT_COMPRESSION) - { - /* - * user has specified a compression level, so tell zlib to use - * it - */ - char mode_compression[32]; - - snprintf(mode_compression, sizeof(mode_compression), "%s%d", - mode, compression_spec.level); - if (fd >= 0) - fp->fp = gzdopen(fd, mode_compression); - else - fp->fp = gzopen(path, mode_compression); - } - else - { - /* don't specify a level, just use the zlib default */ - if (fd >= 0) - fp->fp = gzdopen(fd, mode); - else - fp->fp = gzopen(path, mode); - } - - if (fp->fp == NULL) - { - free_keep_errno(fp); - fp = NULL; - } -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif + InitCompressGzip(CFH, compression_spec); break; case PG_COMPRESSION_LZ4: pg_fatal("compression with %s is not yet supported", "LZ4"); @@ -594,266 +334,88 @@ cfopen_internal(const char *path, int fd, const char *mode, break; } - return fp; + return CFH; } -cfp * -cfopen(const char *path, const char *mode, - const pg_compress_specification compression_spec) +/* + * Open a file for reading. 'path' is the file to open, and 'mode' should + * be either "r" or "rb". + * + * If the file at 'path' does not exist, we append the "{.gz,.lz4}" suffix (i + * 'path' doesn't already have it) and try again. So if you pass "foo" as + * 'path', this will open either "foo" or "foo.gz" or "foo.lz4", trying in that + * order. + * + * On failure, return NULL with an error code in errno. + */ +CompressFileHandle * +InitDiscoverCompressFileHandle(const char *path, const char *mode) { - return cfopen_internal(path, -1, mode, compression_spec); -} + CompressFileHandle *CFH = NULL; + struct stat st; + char *fname; + pg_compress_specification compression_spec = {0}; -cfp * -cfdopen(int fd, const char *mode, - const pg_compress_specification compression_spec) -{ - return cfopen_internal(NULL, fd, mode, compression_spec); -} + compression_spec.algorithm = PG_COMPRESSION_NONE; -int -cfread(void *ptr, int size, cfp *fp) -{ - int ret = 0; + Assert(strcmp(mode, "r") == 0 || strcmp(mode, "rb") == 0); - if (size == 0) - return 0; + fname = strdup(path); - switch (fp->compression_spec.algorithm) + if (hasSuffix(fname, ".gz")) + compression_spec.algorithm = PG_COMPRESSION_GZIP; + else { - case PG_COMPRESSION_NONE: - ret = fread(ptr, 1, size, (FILE *) fp->fp); - if (ret != size && !feof((FILE *) fp->fp)) - READ_ERROR_EXIT((FILE *) fp->fp); + bool exists; - break; - case PG_COMPRESSION_GZIP: + exists = (stat(path, &st) == 0); + /* avoid unused warning if it is not build with compression */ + if (exists) + compression_spec.algorithm = PG_COMPRESSION_NONE; #ifdef HAVE_LIBZ - ret = gzread((gzFile) fp->fp, ptr, size); - if (ret != size && !gzeof((gzFile) fp->fp)) - { - int errnum; - const char *errmsg = gzerror((gzFile) fp->fp, &errnum); - - pg_fatal("could not read from input file: %s", - errnum == Z_ERRNO ? strerror(errno) : errmsg); - } -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; - } - - return ret; -} - -int -cfwrite(const void *ptr, int size, cfp *fp) -{ - int ret = 0; - - switch (fp->compression_spec.algorithm) - { - case PG_COMPRESSION_NONE: - ret = fwrite(ptr, 1, size, (FILE *) fp->fp); - break; - case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - ret = gzwrite((gzFile) fp->fp, ptr, size); -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; - } - - return ret; -} - -int -cfgetc(cfp *fp) -{ - int ret = 0; - - switch (fp->compression_spec.algorithm) - { - case PG_COMPRESSION_NONE: - ret = fgetc((FILE *) fp->fp); - if (ret == EOF) - READ_ERROR_EXIT((FILE *) fp->fp); + if (!exists) + { + free_keep_errno(fname); + fname = psprintf("%s.gz", path); + exists = (stat(fname, &st) == 0); - break; - case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - ret = gzgetc((gzFile) fp->fp); - if (ret == EOF) - { - if (!gzeof((gzFile) fp->fp)) - pg_fatal("could not read from input file: %s", strerror(errno)); - else - pg_fatal("could not read from input file: end of file"); - } -#else - pg_fatal("this build does not support compression with %s", - "gzip"); + if (exists) + compression_spec.algorithm = PG_COMPRESSION_GZIP; + } #endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; - } - - return ret; -} - -char * -cfgets(cfp *fp, char *buf, int len) -{ - char *ret = NULL; - - switch (fp->compression_spec.algorithm) - { - case PG_COMPRESSION_NONE: - ret = fgets(buf, len, (FILE *) fp->fp); +#ifdef USE_LZ4 + if (!exists) + { + free_keep_errno(fname); + fname = psprintf("%s.lz4", path); + exists = (stat(fname, &st) == 0); - break; - case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - ret = gzgets((gzFile) fp->fp, buf, len); -#else - pg_fatal("this build does not support compression with %s", - "gzip"); + if (exists) + compression_spec.algorithm = PG_COMPRESSION_LZ4; + } #endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; - } - - return ret; -} - -int -cfclose(cfp *fp) -{ - int ret = 0; - - if (fp == NULL) - { - errno = EBADF; - return EOF; } - switch (fp->compression_spec.algorithm) + CFH = InitCompressFileHandle(compression_spec); + if (CFH->open_func(fname, -1, mode, CFH)) { - case PG_COMPRESSION_NONE: - ret = fclose((FILE *) fp->fp); - fp->fp = NULL; - - break; - case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - ret = gzclose((gzFile) fp->fp); - fp->fp = NULL; -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; + free_keep_errno(CFH); + CFH = NULL; } + free_keep_errno(fname); - free_keep_errno(fp); - - return ret; + return CFH; } int -cfeof(cfp *fp) +DestroyCompressFileHandle(CompressFileHandle *CFH) { int ret = 0; - switch (fp->compression_spec.algorithm) - { - case PG_COMPRESSION_NONE: - ret = feof((FILE *) fp->fp); + if (CFH->private_data) + ret = CFH->close_func(CFH); - break; - case PG_COMPRESSION_GZIP: -#ifdef HAVE_LIBZ - ret = gzeof((gzFile) fp->fp); -#else - pg_fatal("this build does not support compression with %s", - "gzip"); -#endif - break; - case PG_COMPRESSION_LZ4: - pg_fatal("compression with %s is not yet supported", "LZ4"); - break; - case PG_COMPRESSION_ZSTD: - pg_fatal("compression with %s is not yet supported", "ZSTD"); - break; - } + free_keep_errno(CFH); return ret; } - -const char * -get_cfp_error(cfp *fp) -{ - if (fp->compression_spec.algorithm == PG_COMPRESSION_GZIP) - { -#ifdef HAVE_LIBZ - int errnum; - const char *errmsg = gzerror((gzFile) fp->fp, &errnum); - - if (errnum != Z_ERRNO) - return errmsg; -#else - pg_fatal("this build does not support compression with %s", "gzip"); -#endif - } - - return strerror(errno); -} - -#ifdef HAVE_LIBZ -static int -hasSuffix(const char *filename, const char *suffix) -{ - int filenamelen = strlen(filename); - int suffixlen = strlen(suffix); - - if (filenamelen < suffixlen) - return 0; - - return memcmp(&filename[filenamelen - suffixlen], - suffix, - suffixlen) == 0; -} - -#endif diff --git a/src/bin/pg_dump/compress_io.h b/src/bin/pg_dump/compress_io.h index 6fad6c2cd5..62e3da1b1d 100644 --- a/src/bin/pg_dump/compress_io.h +++ b/src/bin/pg_dump/compress_io.h @@ -37,34 +37,63 @@ typedef void (*WriteFunc) (ArchiveHandle *AH, const char *buf, size_t len); */ typedef size_t (*ReadFunc) (ArchiveHandle *AH, char **buf, size_t *buflen); -/* struct definition appears in compress_io.c */ typedef struct CompressorState CompressorState; +struct CompressorState +{ + /* + * Read all compressed data from the input stream (via readF) and print it + * out with ahwrite(). + */ + void (*readData) (ArchiveHandle *AH, CompressorState *cs); + + /* + * Compress and write data to the output stream (via writeF). + */ + void (*writeData) (ArchiveHandle *AH, CompressorState *cs, + const void *data, size_t dLen); + void (*end) (ArchiveHandle *AH, CompressorState *cs); + + ReadFunc readF; + WriteFunc writeF; + + pg_compress_specification compression_spec; + void *private_data; +}; extern CompressorState *AllocateCompressor(const pg_compress_specification compression_spec, + ReadFunc readF, WriteFunc writeF); -extern void ReadDataFromArchive(ArchiveHandle *AH, - const pg_compress_specification compression_spec, - ReadFunc readF); -extern void WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs, - const void *data, size_t dLen); extern void EndCompressor(ArchiveHandle *AH, CompressorState *cs); +/* + * Compress File Handle + */ +typedef struct CompressFileHandle CompressFileHandle; + +struct CompressFileHandle +{ + int (*open_func) (const char *path, int fd, const char *mode, + CompressFileHandle *CFH); + int (*open_write_func) (const char *path, const char *mode, + CompressFileHandle *cxt); + size_t (*read_func) (void *ptr, size_t size, CompressFileHandle *CFH); + size_t (*write_func) (const void *ptr, size_t size, + struct CompressFileHandle *CFH); + char *(*gets_func) (char *s, int size, CompressFileHandle *CFH); + int (*getc_func) (CompressFileHandle *CFH); + int (*eof_func) (CompressFileHandle *CFH); + int (*close_func) (CompressFileHandle *CFH); + const char *(*get_error_func) (CompressFileHandle *CFH); + + pg_compress_specification compression_spec; + void *private_data; +}; -typedef struct cfp cfp; +extern CompressFileHandle *InitCompressFileHandle( + const pg_compress_specification compression_spec); -extern cfp *cfopen(const char *path, const char *mode, - const pg_compress_specification compression_spec); -extern cfp *cfdopen(int fd, const char *mode, - pg_compress_specification compression_spec); -extern cfp *cfopen_read(const char *path, const char *mode); -extern cfp *cfopen_write(const char *path, const char *mode, - const pg_compress_specification compression_spec); -extern int cfread(void *ptr, int size, cfp *fp); -extern int cfwrite(const void *ptr, int size, cfp *fp); -extern int cfgetc(cfp *fp); -extern char *cfgets(cfp *fp, char *buf, int len); -extern int cfclose(cfp *fp); -extern int cfeof(cfp *fp); -extern const char *get_cfp_error(cfp *fp); +extern CompressFileHandle *InitDiscoverCompressFileHandle(const char *path, + const char *mode); +extern int DestroyCompressFileHandle(CompressFileHandle *CFH); #endif diff --git a/src/bin/pg_dump/meson.build b/src/bin/pg_dump/meson.build index d96e566846..0c73a4707e 100644 --- a/src/bin/pg_dump/meson.build +++ b/src/bin/pg_dump/meson.build @@ -1,5 +1,6 @@ pg_dump_common_sources = files( 'compress_io.c', + 'compress_gzip.c', 'dumputils.c', 'parallel.c', 'pg_backup_archiver.c', diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index fb94317ad9..1f207c6f4d 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -95,8 +95,8 @@ static void dump_lo_buf(ArchiveHandle *AH); static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim); static void SetOutput(ArchiveHandle *AH, const char *filename, const pg_compress_specification compression_spec); -static cfp *SaveOutput(ArchiveHandle *AH); -static void RestoreOutput(ArchiveHandle *AH, cfp *savedOutput); +static CompressFileHandle *SaveOutput(ArchiveHandle *AH); +static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput); static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel); static void restore_toc_entries_prefork(ArchiveHandle *AH, @@ -272,7 +272,7 @@ CloseArchive(Archive *AHX) /* Close the output */ errno = 0; - res = cfclose(AH->OF); + res = DestroyCompressFileHandle(AH->OF); if (res != 0) pg_fatal("could not close output file: %m"); @@ -355,7 +355,7 @@ RestoreArchive(Archive *AHX) bool parallel_mode; bool supports_compression; TocEntry *te; - cfp *sav; + CompressFileHandle *sav; AH->stage = STAGE_INITIALIZING; @@ -1127,7 +1127,7 @@ PrintTOCSummary(Archive *AHX) TocEntry *te; pg_compress_specification out_compression_spec = {0}; teSection curSection; - cfp *sav; + CompressFileHandle *sav; const char *fmtName; char stamp_str[64]; @@ -1143,9 +1143,10 @@ PrintTOCSummary(Archive *AHX) strcpy(stamp_str, "[unknown]"); ahprintf(AH, ";\n; Archive created at %s\n", stamp_str); - ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %d\n", + ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %s\n", sanitize_line(AH->archdbname, false), - AH->tocCount, AH->compression_spec.level); + AH->tocCount, + get_compress_algorithm_name(AH->compression_spec.algorithm)); switch (AH->format) { @@ -1502,6 +1503,7 @@ static void SetOutput(ArchiveHandle *AH, const char *filename, const pg_compress_specification compression_spec) { + CompressFileHandle *CFH; const char *mode; int fn = -1; @@ -1524,33 +1526,32 @@ SetOutput(ArchiveHandle *AH, const char *filename, else mode = PG_BINARY_W; - if (fn >= 0) - AH->OF = cfdopen(dup(fn), mode, compression_spec); - else - AH->OF = cfopen(filename, mode, compression_spec); + CFH = InitCompressFileHandle(compression_spec); - if (!AH->OF) + if (CFH->open_func(filename, fn, mode, CFH)) { if (filename) pg_fatal("could not open output file \"%s\": %m", filename); else pg_fatal("could not open output file: %m"); } + + AH->OF = CFH; } -static cfp * +static CompressFileHandle * SaveOutput(ArchiveHandle *AH) { - return (cfp *) AH->OF; + return (CompressFileHandle *) AH->OF; } static void -RestoreOutput(ArchiveHandle *AH, cfp *savedOutput) +RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput) { int res; errno = 0; - res = cfclose(AH->OF); + res = DestroyCompressFileHandle(AH->OF); if (res != 0) pg_fatal("could not close output file: %m"); @@ -1689,7 +1690,11 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH) else if (RestoringToDB(AH)) bytes_written = ExecuteSqlCommandBuf(&AH->public, (const char *) ptr, size * nmemb); else - bytes_written = cfwrite(ptr, size * nmemb, AH->OF); + { + CompressFileHandle *CFH = (CompressFileHandle *) AH->OF; + + bytes_written = CFH->write_func(ptr, size * nmemb, CFH); + } if (bytes_written != size * nmemb) WRITE_ERROR_EXIT; @@ -2031,6 +2036,18 @@ ReadStr(ArchiveHandle *AH) return buf; } +static bool +_fileExistsInDirectory(const char *dir, const char *filename) +{ + struct stat st; + char buf[MAXPGPATH]; + + if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH) + pg_fatal("directory name too long: \"%s\"", dir); + + return (stat(buf, &st) == 0 && S_ISREG(st.st_mode)); +} + static int _discoverArchiveFormat(ArchiveHandle *AH) { @@ -2061,26 +2078,12 @@ _discoverArchiveFormat(ArchiveHandle *AH) */ if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode)) { - char buf[MAXPGPATH]; - - if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH) - pg_fatal("directory name too long: \"%s\"", - AH->fSpec); - if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) - { - AH->format = archDirectory; + AH->format = archDirectory; + if (_fileExistsInDirectory(AH->fSpec, "toc.dat")) return AH->format; - } - #ifdef HAVE_LIBZ - if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH) - pg_fatal("directory name too long: \"%s\"", - AH->fSpec); - if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) - { - AH->format = archDirectory; + if (_fileExistsInDirectory(AH->fSpec, "toc.dat.gz")) return AH->format; - } #endif pg_fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)", AH->fSpec); @@ -2178,6 +2181,7 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, SetupWorkerPtrType setupWorkerPtr) { ArchiveHandle *AH; + CompressFileHandle *CFH; pg_compress_specification out_compress_spec = {0}; pg_log_debug("allocating AH for %s, format %d", @@ -2233,7 +2237,10 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, /* Open stdout with no compression for AH output handle */ out_compress_spec.algorithm = PG_COMPRESSION_NONE; - AH->OF = cfdopen(dup(fileno(stdout)), PG_BINARY_A, out_compress_spec); + CFH = InitCompressFileHandle(out_compress_spec); + if (CFH->open_func(NULL, fileno(stdout), PG_BINARY_A, CFH)) + pg_fatal("could not open stdout for appending: %m"); + AH->OF = CFH; /* * On Windows, we need to use binary mode to read/write non-text files, @@ -3646,12 +3653,7 @@ WriteHead(ArchiveHandle *AH) AH->WriteBytePtr(AH, AH->intSize); AH->WriteBytePtr(AH, AH->offSize); AH->WriteBytePtr(AH, AH->format); - /* - * For now the compression type is implied by the level. This will need - * to change once support for more compression algorithms is added, - * requiring a format bump. - */ - WriteInt(AH, AH->compression_spec.level); + AH->WriteBytePtr(AH, AH->compression_spec.algorithm); crtm = *localtime(&AH->createDate); WriteInt(AH, crtm.tm_sec); WriteInt(AH, crtm.tm_min); @@ -3722,10 +3724,11 @@ ReadHead(ArchiveHandle *AH) pg_fatal("expected format (%d) differs from format found in file (%d)", AH->format, fmt); - /* Guess the compression method based on the level */ - AH->compression_spec.algorithm = PG_COMPRESSION_NONE; - if (AH->version >= K_VERS_1_2) + if (AH->version >= K_VERS_1_15) + AH->compression_spec.algorithm = AH->ReadBytePtr(AH); + else if (AH->version >= K_VERS_1_2) { + /* Guess the compression method based on the level */ if (AH->version < K_VERS_1_4) AH->compression_spec.level = AH->ReadBytePtr(AH); else @@ -3737,10 +3740,17 @@ ReadHead(ArchiveHandle *AH) else AH->compression_spec.algorithm = PG_COMPRESSION_GZIP; + if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE) + { + bool unsupported = false; + #ifndef HAVE_LIBZ - if (AH->compression_spec.algorithm == PG_COMPRESSION_GZIP) - pg_fatal("archive is compressed, but this installation does not support compression"); + if (AH->compression_spec.algorithm == PG_COMPRESSION_GZIP) + unsupported = true; #endif + if (unsupported) + pg_fatal("archive is compressed, but this installation does not support compression"); + } if (AH->version >= K_VERS_1_4) { diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index 4725e49747..18b38c17ab 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -65,10 +65,13 @@ #define K_VERS_1_13 MAKE_ARCHIVE_VERSION(1, 13, 0) /* change search_path * behavior */ #define K_VERS_1_14 MAKE_ARCHIVE_VERSION(1, 14, 0) /* add tableam */ +#define K_VERS_1_15 MAKE_ARCHIVE_VERSION(1, 15, 0) /* add + * compression_algorithm + * in header */ /* Current archive version number (the format we can output) */ #define K_VERS_MAJOR 1 -#define K_VERS_MINOR 14 +#define K_VERS_MINOR 15 #define K_VERS_REV 0 #define K_VERS_SELF MAKE_ARCHIVE_VERSION(K_VERS_MAJOR, K_VERS_MINOR, K_VERS_REV) diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c index d1e54644a9..512ab043af 100644 --- a/src/bin/pg_dump/pg_backup_custom.c +++ b/src/bin/pg_dump/pg_backup_custom.c @@ -298,7 +298,9 @@ _StartData(ArchiveHandle *AH, TocEntry *te) _WriteByte(AH, BLK_DATA); /* Block type */ WriteInt(AH, te->dumpId); /* For sanity check */ - ctx->cs = AllocateCompressor(AH->compression_spec, _CustomWriteFunc); + ctx->cs = AllocateCompressor(AH->compression_spec, + NULL, + _CustomWriteFunc); } /* @@ -317,15 +319,15 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen) CompressorState *cs = ctx->cs; if (dLen > 0) - /* WriteDataToArchive() internally throws write errors */ - WriteDataToArchive(AH, cs, data, dLen); + /* writeData() internally throws write errors */ + cs->writeData(AH, cs, data, dLen); } /* * Called by the archiver when a dumper's 'DataDumper' routine has * finished. * - * Optional. + * Mandatory. */ static void _EndData(ArchiveHandle *AH, TocEntry *te) @@ -333,6 +335,8 @@ _EndData(ArchiveHandle *AH, TocEntry *te) lclContext *ctx = (lclContext *) AH->formatData; EndCompressor(AH, ctx->cs); + ctx->cs = NULL; + /* Send the end marker */ WriteInt(AH, 0); } @@ -377,7 +381,9 @@ _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid) WriteInt(AH, oid); - ctx->cs = AllocateCompressor(AH->compression_spec, _CustomWriteFunc); + ctx->cs = AllocateCompressor(AH->compression_spec, + NULL, + _CustomWriteFunc); } /* @@ -566,7 +572,12 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) static void _PrintData(ArchiveHandle *AH) { - ReadDataFromArchive(AH, AH->compression_spec, _CustomReadFunc); + CompressorState *cs; + + cs = AllocateCompressor(AH->compression_spec, + _CustomReadFunc, NULL); + cs->readData(AH, cs); + EndCompressor(AH, cs); } static void diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c index f6aee775eb..b6d025576f 100644 --- a/src/bin/pg_dump/pg_backup_directory.c +++ b/src/bin/pg_dump/pg_backup_directory.c @@ -50,9 +50,8 @@ typedef struct */ char *directory; - cfp *dataFH; /* currently open data file */ - - cfp *LOsTocFH; /* file handle for blobs.toc */ + CompressFileHandle *dataFH; /* currently open data file */ + CompressFileHandle *LOsTocFH; /* file handle for blobs.toc */ ParallelState *pstate; /* for parallel backup / restore */ } lclContext; @@ -198,11 +197,11 @@ InitArchiveFmt_Directory(ArchiveHandle *AH) else { /* Read Mode */ char fname[MAXPGPATH]; - cfp *tocFH; + CompressFileHandle *tocFH; setFilePath(AH, fname, "toc.dat"); - tocFH = cfopen_read(fname, PG_BINARY_R); + tocFH = InitDiscoverCompressFileHandle(fname, PG_BINARY_R); if (tocFH == NULL) pg_fatal("could not open input file \"%s\": %m", fname); @@ -218,7 +217,7 @@ InitArchiveFmt_Directory(ArchiveHandle *AH) ReadToc(AH); /* Nothing else in the file, so close it again... */ - if (cfclose(tocFH) != 0) + if (DestroyCompressFileHandle(tocFH) != 0) pg_fatal("could not close TOC file: %m"); ctx->dataFH = NULL; } @@ -327,9 +326,9 @@ _StartData(ArchiveHandle *AH, TocEntry *te) setFilePath(AH, fname, tctx->filename); - ctx->dataFH = cfopen_write(fname, PG_BINARY_W, - AH->compression_spec); - if (ctx->dataFH == NULL) + ctx->dataFH = InitCompressFileHandle(AH->compression_spec); + + if (ctx->dataFH->open_write_func(fname, PG_BINARY_W, ctx->dataFH)) pg_fatal("could not open output file \"%s\": %m", fname); } @@ -346,15 +345,16 @@ static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen) { lclContext *ctx = (lclContext *) AH->formatData; + CompressFileHandle *CFH = ctx->dataFH; errno = 0; - if (dLen > 0 && cfwrite(data, dLen, ctx->dataFH) != dLen) + if (dLen > 0 && CFH->write_func(data, dLen, CFH) != dLen) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; pg_fatal("could not write to output file: %s", - get_cfp_error(ctx->dataFH)); + CFH->get_error_func(CFH)); } } @@ -370,7 +370,7 @@ _EndData(ArchiveHandle *AH, TocEntry *te) lclContext *ctx = (lclContext *) AH->formatData; /* Close the file */ - if (cfclose(ctx->dataFH) != 0) + if (DestroyCompressFileHandle(ctx->dataFH) != 0) pg_fatal("could not close data file: %m"); ctx->dataFH = NULL; @@ -385,26 +385,25 @@ _PrintFileData(ArchiveHandle *AH, char *filename) size_t cnt; char *buf; size_t buflen; - cfp *cfp; + CompressFileHandle *CFH; if (!filename) return; - cfp = cfopen_read(filename, PG_BINARY_R); - - if (!cfp) + CFH = InitDiscoverCompressFileHandle(filename, PG_BINARY_R); + if (!CFH) pg_fatal("could not open input file \"%s\": %m", filename); buf = pg_malloc(ZLIB_OUT_SIZE); buflen = ZLIB_OUT_SIZE; - while ((cnt = cfread(buf, buflen, cfp))) + while ((cnt = CFH->read_func(buf, buflen, CFH))) { ahwrite(buf, 1, cnt, AH); } free(buf); - if (cfclose(cfp) != 0) + if (DestroyCompressFileHandle(CFH) != 0) pg_fatal("could not close data file \"%s\": %m", filename); } @@ -435,6 +434,7 @@ _LoadLOs(ArchiveHandle *AH) { Oid oid; lclContext *ctx = (lclContext *) AH->formatData; + CompressFileHandle *CFH; char tocfname[MAXPGPATH]; char line[MAXPGPATH]; @@ -442,14 +442,14 @@ _LoadLOs(ArchiveHandle *AH) setFilePath(AH, tocfname, "blobs.toc"); - ctx->LOsTocFH = cfopen_read(tocfname, PG_BINARY_R); + CFH = ctx->LOsTocFH = InitDiscoverCompressFileHandle(tocfname, PG_BINARY_R); if (ctx->LOsTocFH == NULL) pg_fatal("could not open large object TOC file \"%s\" for input: %m", tocfname); /* Read the LOs TOC file line-by-line, and process each LO */ - while ((cfgets(ctx->LOsTocFH, line, MAXPGPATH)) != NULL) + while ((CFH->gets_func(line, MAXPGPATH, CFH)) != NULL) { char lofname[MAXPGPATH + 1]; char path[MAXPGPATH]; @@ -464,11 +464,11 @@ _LoadLOs(ArchiveHandle *AH) _PrintFileData(AH, path); EndRestoreLO(AH, oid); } - if (!cfeof(ctx->LOsTocFH)) + if (!CFH->eof_func(CFH)) pg_fatal("error reading large object TOC file \"%s\"", tocfname); - if (cfclose(ctx->LOsTocFH) != 0) + if (DestroyCompressFileHandle(ctx->LOsTocFH) != 0) pg_fatal("could not close large object TOC file \"%s\": %m", tocfname); @@ -488,15 +488,16 @@ _WriteByte(ArchiveHandle *AH, const int i) { unsigned char c = (unsigned char) i; lclContext *ctx = (lclContext *) AH->formatData; + CompressFileHandle *CFH = ctx->dataFH; errno = 0; - if (cfwrite(&c, 1, ctx->dataFH) != 1) + if (CFH->write_func(&c, 1, CFH) != 1) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; pg_fatal("could not write to output file: %s", - get_cfp_error(ctx->dataFH)); + CFH->get_error_func(CFH)); } return 1; @@ -512,8 +513,9 @@ static int _ReadByte(ArchiveHandle *AH) { lclContext *ctx = (lclContext *) AH->formatData; + CompressFileHandle *CFH = ctx->dataFH; - return cfgetc(ctx->dataFH); + return CFH->getc_func(CFH); } /* @@ -524,15 +526,16 @@ static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len) { lclContext *ctx = (lclContext *) AH->formatData; + CompressFileHandle *CFH = ctx->dataFH; errno = 0; - if (cfwrite(buf, len, ctx->dataFH) != len) + if (CFH->write_func(buf, len, CFH) != len) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; pg_fatal("could not write to output file: %s", - get_cfp_error(ctx->dataFH)); + CFH->get_error_func(CFH)); } } @@ -545,12 +548,13 @@ static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len) { lclContext *ctx = (lclContext *) AH->formatData; + CompressFileHandle *CFH = ctx->dataFH; /* - * If there was an I/O error, we already exited in cfread(), so here we + * If there was an I/O error, we already exited in readF(), so here we * exit on short reads. */ - if (cfread(buf, len, ctx->dataFH) != len) + if (CFH->read_func(buf, len, CFH) != len) pg_fatal("could not read from input file: end of file"); } @@ -573,7 +577,7 @@ _CloseArchive(ArchiveHandle *AH) if (AH->mode == archModeWrite) { - cfp *tocFH; + CompressFileHandle *tocFH; pg_compress_specification compression_spec = {0}; char fname[MAXPGPATH]; @@ -584,8 +588,8 @@ _CloseArchive(ArchiveHandle *AH) /* The TOC is always created uncompressed */ compression_spec.algorithm = PG_COMPRESSION_NONE; - tocFH = cfopen_write(fname, PG_BINARY_W, compression_spec); - if (tocFH == NULL) + tocFH = InitCompressFileHandle(compression_spec); + if (tocFH->open_write_func(fname, PG_BINARY_W, tocFH)) pg_fatal("could not open output file \"%s\": %m", fname); ctx->dataFH = tocFH; @@ -598,7 +602,7 @@ _CloseArchive(ArchiveHandle *AH) WriteHead(AH); AH->format = archDirectory; WriteToc(AH); - if (cfclose(tocFH) != 0) + if (DestroyCompressFileHandle(tocFH) != 0) pg_fatal("could not close TOC file: %m"); WriteDataChunks(AH, ctx->pstate); @@ -649,8 +653,8 @@ _StartLOs(ArchiveHandle *AH, TocEntry *te) /* The LO TOC file is never compressed */ compression_spec.algorithm = PG_COMPRESSION_NONE; - ctx->LOsTocFH = cfopen_write(fname, "ab", compression_spec); - if (ctx->LOsTocFH == NULL) + ctx->LOsTocFH = InitCompressFileHandle(compression_spec); + if (ctx->LOsTocFH->open_write_func(fname, "ab", ctx->LOsTocFH)) pg_fatal("could not open output file \"%s\": %m", fname); } @@ -667,9 +671,8 @@ _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid) snprintf(fname, MAXPGPATH, "%s/blob_%u.dat", ctx->directory, oid); - ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression_spec); - - if (ctx->dataFH == NULL) + ctx->dataFH = InitCompressFileHandle(AH->compression_spec); + if (ctx->dataFH->open_write_func(fname, PG_BINARY_W, ctx->dataFH)) pg_fatal("could not open output file \"%s\": %m", fname); } @@ -682,18 +685,19 @@ static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclContext *ctx = (lclContext *) AH->formatData; + CompressFileHandle *CFH = ctx->LOsTocFH; char buf[50]; int len; - /* Close the LO data file itself */ - if (cfclose(ctx->dataFH) != 0) - pg_fatal("could not close LO data file: %m"); + /* Close the BLOB data file itself */ + if (DestroyCompressFileHandle(ctx->dataFH) != 0) + pg_fatal("could not close blob data file: %m"); ctx->dataFH = NULL; /* register the LO in blobs.toc */ len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid); - if (cfwrite(buf, len, ctx->LOsTocFH) != len) - pg_fatal("could not write to LOs TOC file"); + if (CFH->write_func(buf, len, CFH) != len) + pg_fatal("could not write to blobs TOC file"); } /* @@ -706,8 +710,8 @@ _EndLOs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; - if (cfclose(ctx->LOsTocFH) != 0) - pg_fatal("could not close LOs TOC file: %m"); + if (DestroyCompressFileHandle(ctx->LOsTocFH) != 0) + pg_fatal("could not close blobs TOC file: %m"); ctx->LOsTocFH = NULL; } diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 1c7fc728c2..39daa1fc43 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -94,7 +94,7 @@ my %pgdump_runs = ( command => [ 'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump", ], - expected => qr/Compression: 1/, + expected => qr/Compression: gzip/, name => 'data content is gzip-compressed' }, }, @@ -239,8 +239,8 @@ my %pgdump_runs = ( command => [ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ], expected => $supports_gzip ? - qr/Compression: -1/ : - qr/Compression: 0/, + qr/Compression: gzip/ : + qr/Compression: none/, name => 'data content is gzip-compressed by default if available', }, }, @@ -264,8 +264,8 @@ my %pgdump_runs = ( command => [ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ], expected => $supports_gzip ? - qr/Compression: -1/ : - qr/Compression: 0/, + qr/Compression: gzip/ : + qr/Compression: none/, name => 'data content is gzip-compressed by default', }, glob_patterns => [ diff --git a/src/tools/pginclude/cpluspluscheck b/src/tools/pginclude/cpluspluscheck index b393f2a2ea..8805237edb 100755 --- a/src/tools/pginclude/cpluspluscheck +++ b/src/tools/pginclude/cpluspluscheck @@ -150,6 +150,7 @@ do # pg_dump is not C++-clean because it uses "public" and "namespace" # as field names, which is unfortunate but we won't change it now. + test "$f" = src/bin/pg_dump/compress_gzip.h && continue test "$f" = src/bin/pg_dump/compress_io.h && continue test "$f" = src/bin/pg_dump/parallel.h && continue test "$f" = src/bin/pg_dump/pg_backup_archiver.h && continue diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 60c71d05fe..81a451641a 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -428,6 +428,7 @@ CompiledExprState CompositeIOData CompositeTypeStmt CompoundAffixFlag +CompressFileHandle CompressionLocation CompressorState ComputeXidHorizonsResult @@ -1034,6 +1035,7 @@ GucStack GucStackState GucStringAssignHook GucStringCheckHook +GzipCompressorState HANDLE HASHACTION HASHBUCKET -- 2.34.1