aboutsummaryrefslogtreecommitdiff
path: root/src/bin/pg_dump
diff options
context:
space:
mode:
Diffstat (limited to 'src/bin/pg_dump')
-rw-r--r--src/bin/pg_dump/common.c27
-rw-r--r--src/bin/pg_dump/compress_io.c38
-rw-r--r--src/bin/pg_dump/nls.mk3
-rw-r--r--src/bin/pg_dump/parallel.c39
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c154
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h6
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c72
-rw-r--r--src/bin/pg_dump/pg_backup_db.c46
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c72
-rw-r--r--src/bin/pg_dump/pg_backup_null.c4
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c70
-rw-r--r--src/bin/pg_dump/pg_backup_utils.c8
-rw-r--r--src/bin/pg_dump/pg_backup_utils.h8
-rw-r--r--src/bin/pg_dump/pg_dump.c278
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c14
-rw-r--r--src/bin/pg_dump/pg_dumpall.c105
-rw-r--r--src/bin/pg_dump/pg_restore.c44
-rw-r--r--src/bin/pg_dump/t/003_pg_dump_with_server.pl2
18 files changed, 448 insertions, 542 deletions
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index b9a25442f5f..794e6e7ce99 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -340,9 +340,9 @@ flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
/* With partitions there can only be one parent */
if (tblinfo[i].numParents != 1)
- fatal("invalid number of parents %d for table \"%s\"",
- tblinfo[i].numParents,
- tblinfo[i].dobj.name);
+ pg_fatal("invalid number of parents %d for table \"%s\"",
+ tblinfo[i].numParents,
+ tblinfo[i].dobj.name);
attachinfo = (TableAttachInfo *) palloc(sizeof(TableAttachInfo));
attachinfo->dobj.objType = DO_TABLE_ATTACH;
@@ -1001,13 +1001,10 @@ findParentsByOid(TableInfo *self,
parent = findTableByOid(inhinfo[i].inhparent);
if (parent == NULL)
- {
- pg_log_error("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
- inhinfo[i].inhparent,
- self->dobj.name,
- oid);
- exit_nicely(1);
- }
+ pg_fatal("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
+ inhinfo[i].inhparent,
+ self->dobj.name,
+ oid);
self->parents[j++] = parent;
}
}
@@ -1043,10 +1040,7 @@ parseOidArray(const char *str, Oid *array, int arraysize)
if (j > 0)
{
if (argNum >= arraysize)
- {
- pg_log_error("could not parse numeric array \"%s\": too many numbers", str);
- exit_nicely(1);
- }
+ pg_fatal("could not parse numeric array \"%s\": too many numbers", str);
temp[j] = '\0';
array[argNum++] = atooid(temp);
j = 0;
@@ -1058,10 +1052,7 @@ parseOidArray(const char *str, Oid *array, int arraysize)
{
if (!(isdigit((unsigned char) s) || s == '-') ||
j >= sizeof(temp) - 1)
- {
- pg_log_error("could not parse numeric array \"%s\": invalid character in number", str);
- exit_nicely(1);
- }
+ pg_fatal("could not parse numeric array \"%s\": invalid character in number", str);
temp[j++] = s;
}
}
diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c
index 9077fdb74db..62f940ff7af 100644
--- a/src/bin/pg_dump/compress_io.c
+++ b/src/bin/pg_dump/compress_io.c
@@ -108,7 +108,7 @@ ParseCompressionOption(int compression, CompressionAlgorithm *alg, int *level)
*alg = COMPR_ALG_NONE;
else
{
- fatal("invalid compression code: %d", compression);
+ pg_fatal("invalid compression code: %d", compression);
*alg = COMPR_ALG_NONE; /* keep compiler quiet */
}
@@ -131,7 +131,7 @@ AllocateCompressor(int compression, WriteFunc writeF)
#ifndef HAVE_LIBZ
if (alg == COMPR_ALG_LIBZ)
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
cs = (CompressorState *) pg_malloc0(sizeof(CompressorState));
@@ -167,7 +167,7 @@ ReadDataFromArchive(ArchiveHandle *AH, int compression, ReadFunc readF)
#ifdef HAVE_LIBZ
ReadDataFromArchiveZlib(AH, readF);
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
}
}
@@ -185,7 +185,7 @@ WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs,
#ifdef HAVE_LIBZ
WriteDataToArchiveZlib(AH, cs, data, dLen);
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
break;
case COMPR_ALG_NONE:
@@ -233,8 +233,8 @@ InitCompressorZlib(CompressorState *cs, int level)
cs->zlibOutSize = ZLIB_OUT_SIZE;
if (deflateInit(zp, level) != Z_OK)
- fatal("could not initialize compression library: %s",
- zp->msg);
+ pg_fatal("could not initialize compression library: %s",
+ zp->msg);
/* Just be paranoid - maybe End is called after Start, with no Write */
zp->next_out = (void *) cs->zlibOut;
@@ -253,7 +253,7 @@ EndCompressorZlib(ArchiveHandle *AH, CompressorState *cs)
DeflateCompressorZlib(AH, cs, true);
if (deflateEnd(zp) != Z_OK)
- fatal("could not close compression stream: %s", zp->msg);
+ pg_fatal("could not close compression stream: %s", zp->msg);
free(cs->zlibOut);
free(cs->zp);
@@ -270,7 +270,7 @@ DeflateCompressorZlib(ArchiveHandle *AH, CompressorState *cs, bool flush)
{
res = deflate(zp, flush ? Z_FINISH : Z_NO_FLUSH);
if (res == Z_STREAM_ERROR)
- fatal("could not compress data: %s", zp->msg);
+ pg_fatal("could not compress data: %s", zp->msg);
if ((flush && (zp->avail_out < cs->zlibOutSize))
|| (zp->avail_out == 0)
|| (zp->avail_in != 0)
@@ -330,8 +330,8 @@ ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
out = pg_malloc(ZLIB_OUT_SIZE + 1);
if (inflateInit(zp) != Z_OK)
- fatal("could not initialize compression library: %s",
- zp->msg);
+ pg_fatal("could not initialize compression library: %s",
+ zp->msg);
/* no minimal chunk size for zlib */
while ((cnt = readF(AH, &buf, &buflen)))
@@ -346,7 +346,7 @@ ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
res = inflate(zp, 0);
if (res != Z_OK && res != Z_STREAM_END)
- fatal("could not uncompress data: %s", zp->msg);
+ pg_fatal("could not uncompress data: %s", zp->msg);
out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
@@ -361,14 +361,14 @@ ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
zp->avail_out = ZLIB_OUT_SIZE;
res = inflate(zp, 0);
if (res != Z_OK && res != Z_STREAM_END)
- fatal("could not uncompress data: %s", zp->msg);
+ pg_fatal("could not uncompress data: %s", zp->msg);
out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
}
if (inflateEnd(zp) != Z_OK)
- fatal("could not close compression library: %s", zp->msg);
+ pg_fatal("could not close compression library: %s", zp->msg);
free(buf);
free(out);
@@ -501,7 +501,7 @@ cfopen_write(const char *path, const char *mode, int compression)
fp = cfopen(fname, mode, compression);
free_keep_errno(fname);
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
fp = NULL; /* keep compiler quiet */
#endif
}
@@ -544,7 +544,7 @@ cfopen(const char *path, const char *mode, int compression)
fp = NULL;
}
#else
- fatal("not built with zlib support");
+ pg_fatal("not built with zlib support");
#endif
}
else
@@ -581,8 +581,8 @@ cfread(void *ptr, int size, cfp *fp)
int errnum;
const char *errmsg = gzerror(fp->compressedfp, &errnum);
- fatal("could not read from input file: %s",
- errnum == Z_ERRNO ? strerror(errno) : errmsg);
+ pg_fatal("could not read from input file: %s",
+ errnum == Z_ERRNO ? strerror(errno) : errmsg);
}
}
else
@@ -618,9 +618,9 @@ cfgetc(cfp *fp)
if (ret == EOF)
{
if (!gzeof(fp->compressedfp))
- fatal("could not read from input file: %s", strerror(errno));
+ pg_fatal("could not read from input file: %s", strerror(errno));
else
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
}
}
else
diff --git a/src/bin/pg_dump/nls.mk b/src/bin/pg_dump/nls.mk
index 6276fd443b1..220d1ec75f1 100644
--- a/src/bin/pg_dump/nls.mk
+++ b/src/bin/pg_dump/nls.mk
@@ -11,8 +11,7 @@ GETTEXT_FILES = $(FRONTEND_COMMON_GETTEXT_FILES) \
../../common/exec.c ../../common/fe_memutils.c \
../../common/wait_error.c
GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) \
- fatal simple_prompt \
+ simple_prompt \
ExecuteSqlCommand:3 warn_or_exit_horribly:2
GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS) \
- fatal:1:c-format \
warn_or_exit_horribly:2:c-format
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index bc5251be824..c9f6b86bb05 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -250,10 +250,7 @@ init_parallel_dump_utils(void)
/* Initialize socket access */
err = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (err != 0)
- {
- pg_log_error("%s() failed: error code %d", "WSAStartup", err);
- exit_nicely(1);
- }
+ pg_fatal("%s() failed: error code %d", "WSAStartup", err);
parallel_init_done = true;
}
@@ -393,7 +390,7 @@ archive_close_connection(int code, void *arg)
*
* Note that we don't expect to come here during normal exit (the workers
* should be long gone, and the ParallelState too). We're only here in a
- * fatal() situation, so intervening to cancel active commands is
+ * pg_fatal() situation, so intervening to cancel active commands is
* appropriate.
*/
static void
@@ -961,7 +958,7 @@ ParallelBackupStart(ArchiveHandle *AH)
/* Create communication pipes for this worker */
if (pgpipe(pipeMW) < 0 || pgpipe(pipeWM) < 0)
- fatal("could not create communication channels: %m");
+ pg_fatal("could not create communication channels: %m");
/* leader's ends of the pipes */
slot->pipeRead = pipeWM[PIPE_READ];
@@ -1018,7 +1015,7 @@ ParallelBackupStart(ArchiveHandle *AH)
else if (pid < 0)
{
/* fork failed */
- fatal("could not create worker process: %m");
+ pg_fatal("could not create worker process: %m");
}
/* In Leader after successful fork */
@@ -1148,8 +1145,8 @@ parseWorkerCommand(ArchiveHandle *AH, TocEntry **te, T_Action *act,
Assert(*te != NULL);
}
else
- fatal("unrecognized command received from leader: \"%s\"",
- msg);
+ pg_fatal("unrecognized command received from leader: \"%s\"",
+ msg);
}
/*
@@ -1191,8 +1188,8 @@ parseWorkerResponse(ArchiveHandle *AH, TocEntry *te,
AH->public.n_errors += n_errors;
}
else
- fatal("invalid message received from worker: \"%s\"",
- msg);
+ pg_fatal("invalid message received from worker: \"%s\"",
+ msg);
return status;
}
@@ -1323,10 +1320,10 @@ lockTableForWorker(ArchiveHandle *AH, TocEntry *te)
res = PQexec(AH->connection, query->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
- fatal("could not obtain lock on relation \"%s\"\n"
- "This usually means that someone requested an ACCESS EXCLUSIVE lock "
- "on the table after the pg_dump parent process had gotten the "
- "initial ACCESS SHARE lock on the table.", qualId);
+ pg_fatal("could not obtain lock on relation \"%s\"\n"
+ "This usually means that someone requested an ACCESS EXCLUSIVE lock "
+ "on the table after the pg_dump parent process had gotten the "
+ "initial ACCESS SHARE lock on the table.", qualId);
PQclear(res);
destroyPQExpBuffer(query);
@@ -1412,7 +1409,7 @@ ListenToWorkers(ArchiveHandle *AH, ParallelState *pstate, bool do_wait)
{
/* If do_wait is true, we must have detected EOF on some socket */
if (do_wait)
- fatal("a worker process died unexpectedly");
+ pg_fatal("a worker process died unexpectedly");
return false;
}
@@ -1429,8 +1426,8 @@ ListenToWorkers(ArchiveHandle *AH, ParallelState *pstate, bool do_wait)
pstate->te[worker] = NULL;
}
else
- fatal("invalid message received from worker: \"%s\"",
- msg);
+ pg_fatal("invalid message received from worker: \"%s\"",
+ msg);
/* Free the string returned from getMessageFromWorker */
free(msg);
@@ -1534,7 +1531,7 @@ sendMessageToLeader(int pipefd[2], const char *str)
int len = strlen(str) + 1;
if (pipewrite(pipefd[PIPE_WRITE], str, len) != len)
- fatal("could not write to the communication channel: %m");
+ pg_fatal("could not write to the communication channel: %m");
}
/*
@@ -1611,7 +1608,7 @@ getMessageFromWorker(ParallelState *pstate, bool do_wait, int *worker)
}
if (i < 0)
- fatal("%s() failed: %m", "select");
+ pg_fatal("%s() failed: %m", "select");
for (i = 0; i < pstate->numWorkers; i++)
{
@@ -1652,7 +1649,7 @@ sendMessageToWorker(ParallelState *pstate, int worker, const char *str)
if (pipewrite(pstate->parallelSlot[worker].pipeWrite, str, len) != len)
{
- fatal("could not write to the communication channel: %m");
+ pg_fatal("could not write to the communication channel: %m");
}
}
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index d41a99d6ea7..24e42fa5d7d 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -276,7 +276,7 @@ CloseArchive(Archive *AHX)
res = fclose(AH->OF);
if (res != 0)
- fatal("could not close output file: %m");
+ pg_fatal("could not close output file: %m");
}
/* Public */
@@ -330,8 +330,8 @@ ProcessArchiveRestoreOptions(Archive *AHX)
/* ok no matter which section we were in */
break;
default:
- fatal("unexpected section code %d",
- (int) te->section);
+ pg_fatal("unexpected section code %d",
+ (int) te->section);
break;
}
}
@@ -367,11 +367,11 @@ RestoreArchive(Archive *AHX)
{
/* We haven't got round to making this work for all archive formats */
if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
- fatal("parallel restore is not supported with this archive file format");
+ pg_fatal("parallel restore is not supported with this archive file format");
/* Doesn't work if the archive represents dependencies as OIDs */
if (AH->version < K_VERS_1_8)
- fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump");
+ pg_fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump");
/*
* It's also not gonna work if we can't reopen the input file, so
@@ -389,7 +389,7 @@ RestoreArchive(Archive *AHX)
for (te = AH->toc->next; te != AH->toc; te = te->next)
{
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
- fatal("cannot restore from compressed archive (compression not supported in this installation)");
+ pg_fatal("cannot restore from compressed archive (compression not supported in this installation)");
}
}
#endif
@@ -408,7 +408,7 @@ RestoreArchive(Archive *AHX)
{
pg_log_info("connecting to database for restore");
if (AH->version < K_VERS_1_3)
- fatal("direct database connections are not supported in pre-1.3 archives");
+ pg_fatal("direct database connections are not supported in pre-1.3 archives");
/*
* We don't want to guess at whether the dump will successfully
@@ -1037,7 +1037,7 @@ WriteData(Archive *AHX, const void *data, size_t dLen)
ArchiveHandle *AH = (ArchiveHandle *) AHX;
if (!AH->currToc)
- fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine");
+ pg_fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine");
AH->WriteDataPtr(AH, data, dLen);
}
@@ -1220,7 +1220,7 @@ StartBlob(Archive *AHX, Oid oid)
ArchiveHandle *AH = (ArchiveHandle *) AHX;
if (!AH->StartBlobPtr)
- fatal("large-object output not supported in chosen format");
+ pg_fatal("large-object output not supported in chosen format");
AH->StartBlobPtr(AH, AH->currToc, oid);
@@ -1311,13 +1311,13 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
{
loOid = lo_create(AH->connection, oid);
if (loOid == 0 || loOid != oid)
- fatal("could not create large object %u: %s",
- oid, PQerrorMessage(AH->connection));
+ pg_fatal("could not create large object %u: %s",
+ oid, PQerrorMessage(AH->connection));
}
AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
if (AH->loFd == -1)
- fatal("could not open large object %u: %s",
- oid, PQerrorMessage(AH->connection));
+ pg_fatal("could not open large object %u: %s",
+ oid, PQerrorMessage(AH->connection));
}
else
{
@@ -1372,7 +1372,7 @@ SortTocFromFile(Archive *AHX)
/* Setup the file */
fh = fopen(ropt->tocFile, PG_BINARY_R);
if (!fh)
- fatal("could not open TOC file \"%s\": %m", ropt->tocFile);
+ pg_fatal("could not open TOC file \"%s\": %m", ropt->tocFile);
initStringInfo(&linebuf);
@@ -1407,8 +1407,8 @@ SortTocFromFile(Archive *AHX)
/* Find TOC entry */
te = getTocEntryByDumpId(AH, id);
if (!te)
- fatal("could not find entry for ID %d",
- id);
+ pg_fatal("could not find entry for ID %d",
+ id);
/* Mark it wanted */
ropt->idWanted[id - 1] = true;
@@ -1430,7 +1430,7 @@ SortTocFromFile(Archive *AHX)
pg_free(linebuf.data);
if (fclose(fh) != 0)
- fatal("could not close TOC file: %m");
+ pg_fatal("could not close TOC file: %m");
}
/**********************
@@ -1544,9 +1544,9 @@ SetOutput(ArchiveHandle *AH, const char *filename, int compression)
if (!AH->OF)
{
if (filename)
- fatal("could not open output file \"%s\": %m", filename);
+ pg_fatal("could not open output file \"%s\": %m", filename);
else
- fatal("could not open output file: %m");
+ pg_fatal("could not open output file: %m");
}
}
@@ -1573,7 +1573,7 @@ RestoreOutput(ArchiveHandle *AH, OutputContext savedContext)
res = fclose(AH->OF);
if (res != 0)
- fatal("could not close output file: %m");
+ pg_fatal("could not close output file: %m");
AH->gzOut = savedContext.gzOut;
AH->OF = savedContext.OF;
@@ -1736,34 +1736,34 @@ warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
case STAGE_INITIALIZING:
if (AH->stage != AH->lastErrorStage)
- pg_log_generic(PG_LOG_INFO, "while INITIALIZING:");
+ pg_log_info("while INITIALIZING:");
break;
case STAGE_PROCESSING:
if (AH->stage != AH->lastErrorStage)
- pg_log_generic(PG_LOG_INFO, "while PROCESSING TOC:");
+ pg_log_info("while PROCESSING TOC:");
break;
case STAGE_FINALIZING:
if (AH->stage != AH->lastErrorStage)
- pg_log_generic(PG_LOG_INFO, "while FINALIZING:");
+ pg_log_info("while FINALIZING:");
break;
}
if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
{
- pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s",
- AH->currentTE->dumpId,
- AH->currentTE->catalogId.tableoid,
- AH->currentTE->catalogId.oid,
- AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
- AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
- AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
+ pg_log_info("from TOC entry %d; %u %u %s %s %s",
+ AH->currentTE->dumpId,
+ AH->currentTE->catalogId.tableoid,
+ AH->currentTE->catalogId.oid,
+ AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
+ AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
+ AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
}
AH->lastErrorStage = AH->stage;
AH->lastErrorTE = AH->currentTE;
va_start(ap, fmt);
- pg_log_generic_v(PG_LOG_ERROR, fmt, ap);
+ pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, fmt, ap);
va_end(ap);
if (AH->public.exit_on_error)
@@ -1827,7 +1827,7 @@ buildTocEntryArrays(ArchiveHandle *AH)
{
/* this check is purely paranoia, maxDumpId should be correct */
if (te->dumpId <= 0 || te->dumpId > maxDumpId)
- fatal("bad dumpId");
+ pg_fatal("bad dumpId");
/* tocsByDumpId indexes all TOCs by their dump ID */
AH->tocsByDumpId[te->dumpId] = te;
@@ -1848,7 +1848,7 @@ buildTocEntryArrays(ArchiveHandle *AH)
* item's dump ID, so there should be a place for it in the array.
*/
if (tableId <= 0 || tableId > maxDumpId)
- fatal("bad table dumpId for TABLE DATA item");
+ pg_fatal("bad table dumpId for TABLE DATA item");
AH->tableDataId[tableId] = te->dumpId;
}
@@ -1940,7 +1940,7 @@ ReadOffset(ArchiveHandle *AH, pgoff_t * o)
break;
default:
- fatal("unexpected data offset flag %d", offsetFlg);
+ pg_fatal("unexpected data offset flag %d", offsetFlg);
}
/*
@@ -1953,7 +1953,7 @@ ReadOffset(ArchiveHandle *AH, pgoff_t * o)
else
{
if (AH->ReadBytePtr(AH) != 0)
- fatal("file offset in dump file is too large");
+ pg_fatal("file offset in dump file is too large");
}
}
@@ -2091,8 +2091,8 @@ _discoverArchiveFormat(ArchiveHandle *AH)
char buf[MAXPGPATH];
if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
- fatal("directory name too long: \"%s\"",
- AH->fSpec);
+ pg_fatal("directory name too long: \"%s\"",
+ AH->fSpec);
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
{
AH->format = archDirectory;
@@ -2101,39 +2101,39 @@ _discoverArchiveFormat(ArchiveHandle *AH)
#ifdef HAVE_LIBZ
if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
- fatal("directory name too long: \"%s\"",
- AH->fSpec);
+ pg_fatal("directory name too long: \"%s\"",
+ AH->fSpec);
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
{
AH->format = archDirectory;
return AH->format;
}
#endif
- fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
- AH->fSpec);
+ pg_fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
+ AH->fSpec);
fh = NULL; /* keep compiler quiet */
}
else
{
fh = fopen(AH->fSpec, PG_BINARY_R);
if (!fh)
- fatal("could not open input file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
}
}
else
{
fh = stdin;
if (!fh)
- fatal("could not open input file: %m");
+ pg_fatal("could not open input file: %m");
}
if ((cnt = fread(sig, 1, 5, fh)) != 5)
{
if (ferror(fh))
- fatal("could not read input file: %m");
+ pg_fatal("could not read input file: %m");
else
- fatal("input file is too short (read %lu, expected 5)",
- (unsigned long) cnt);
+ pg_fatal("input file is too short (read %lu, expected 5)",
+ (unsigned long) cnt);
}
/* Save it, just in case we need it later */
@@ -2164,19 +2164,19 @@ _discoverArchiveFormat(ArchiveHandle *AH)
* looks like it's probably a text format dump. so suggest they
* try psql
*/
- fatal("input file appears to be a text format dump. Please use psql.");
+ pg_fatal("input file appears to be a text format dump. Please use psql.");
}
if (AH->lookaheadLen != 512)
{
if (feof(fh))
- fatal("input file does not appear to be a valid archive (too short?)");
+ pg_fatal("input file does not appear to be a valid archive (too short?)");
else
READ_ERROR_EXIT(fh);
}
if (!isValidTarHeader(AH->lookahead))
- fatal("input file does not appear to be a valid archive");
+ pg_fatal("input file does not appear to be a valid archive");
AH->format = archTar;
}
@@ -2185,7 +2185,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
if (wantClose)
{
if (fclose(fh) != 0)
- fatal("could not close input file: %m");
+ pg_fatal("could not close input file: %m");
/* Forget lookahead, since we'll re-read header after re-opening */
AH->readHeader = 0;
AH->lookaheadLen = 0;
@@ -2302,7 +2302,7 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt,
break;
default:
- fatal("unrecognized file format \"%d\"", fmt);
+ pg_fatal("unrecognized file format \"%d\"", fmt);
}
return AH;
@@ -2388,8 +2388,8 @@ mark_dump_job_done(ArchiveHandle *AH,
te->dumpId, te->desc, te->tag);
if (status != 0)
- fatal("worker process failed: exit code %d",
- status);
+ pg_fatal("worker process failed: exit code %d",
+ status);
}
@@ -2509,8 +2509,8 @@ ReadToc(ArchiveHandle *AH)
/* Sanity check */
if (te->dumpId <= 0)
- fatal("entry ID %d out of range -- perhaps a corrupt TOC",
- te->dumpId);
+ pg_fatal("entry ID %d out of range -- perhaps a corrupt TOC",
+ te->dumpId);
te->hadDumper = ReadInt(AH);
@@ -2671,13 +2671,13 @@ processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
*ptr2 = '\0';
encoding = pg_char_to_encoding(ptr1);
if (encoding < 0)
- fatal("unrecognized encoding \"%s\"",
- ptr1);
+ pg_fatal("unrecognized encoding \"%s\"",
+ ptr1);
AH->public.encoding = encoding;
}
else
- fatal("invalid ENCODING item: %s",
- te->defn);
+ pg_fatal("invalid ENCODING item: %s",
+ te->defn);
free(defn);
}
@@ -2694,8 +2694,8 @@ processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0)
AH->public.std_strings = false;
else
- fatal("invalid STDSTRINGS item: %s",
- te->defn);
+ pg_fatal("invalid STDSTRINGS item: %s",
+ te->defn);
}
static void
@@ -2719,35 +2719,35 @@ StrictNamesCheck(RestoreOptions *ropt)
{
missing_name = simple_string_list_not_touched(&ropt->schemaNames);
if (missing_name != NULL)
- fatal("schema \"%s\" not found", missing_name);
+ pg_fatal("schema \"%s\" not found", missing_name);
}
if (ropt->tableNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->tableNames);
if (missing_name != NULL)
- fatal("table \"%s\" not found", missing_name);
+ pg_fatal("table \"%s\" not found", missing_name);
}
if (ropt->indexNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->indexNames);
if (missing_name != NULL)
- fatal("index \"%s\" not found", missing_name);
+ pg_fatal("index \"%s\" not found", missing_name);
}
if (ropt->functionNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->functionNames);
if (missing_name != NULL)
- fatal("function \"%s\" not found", missing_name);
+ pg_fatal("function \"%s\" not found", missing_name);
}
if (ropt->triggerNames.head != NULL)
{
missing_name = simple_string_list_not_touched(&ropt->triggerNames);
if (missing_name != NULL)
- fatal("trigger \"%s\" not found", missing_name);
+ pg_fatal("trigger \"%s\" not found", missing_name);
}
}
@@ -3140,8 +3140,8 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user)
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
/* NOT warn_or_exit_horribly... use -O instead to skip this. */
- fatal("could not set session user to \"%s\": %s",
- user, PQerrorMessage(AH->connection));
+ pg_fatal("could not set session user to \"%s\": %s",
+ user, PQerrorMessage(AH->connection));
PQclear(res);
}
@@ -3751,7 +3751,7 @@ ReadHead(ArchiveHandle *AH)
AH->ReadBufPtr(AH, tmpMag, 5);
if (strncmp(tmpMag, "PGDMP", 5) != 0)
- fatal("did not find magic string in file header");
+ pg_fatal("did not find magic string in file header");
}
vmaj = AH->ReadBytePtr(AH);
@@ -3765,13 +3765,13 @@ ReadHead(ArchiveHandle *AH)
AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev);
if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
- fatal("unsupported version (%d.%d) in file header",
- vmaj, vmin);
+ pg_fatal("unsupported version (%d.%d) in file header",
+ vmaj, vmin);
AH->intSize = AH->ReadBytePtr(AH);
if (AH->intSize > 32)
- fatal("sanity check on integer size (%lu) failed",
- (unsigned long) AH->intSize);
+ pg_fatal("sanity check on integer size (%lu) failed",
+ (unsigned long) AH->intSize);
if (AH->intSize > sizeof(int))
pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
@@ -3784,8 +3784,8 @@ ReadHead(ArchiveHandle *AH)
fmt = AH->ReadBytePtr(AH);
if (AH->format != fmt)
- fatal("expected format (%d) differs from format found in file (%d)",
- AH->format, fmt);
+ pg_fatal("expected format (%d) differs from format found in file (%d)",
+ AH->format, fmt);
if (AH->version >= K_VERS_1_2)
{
@@ -4455,8 +4455,8 @@ mark_restore_job_done(ArchiveHandle *AH,
else if (status == WORKER_IGNORED_ERRORS)
AH->public.n_errors++;
else if (status != 0)
- fatal("worker process failed: exit code %d",
- status);
+ pg_fatal("worker process failed: exit code %d",
+ status);
reduce_dependencies(AH, te, ready_list);
}
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index 540d4f6a833..084cd87e8d7 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -121,14 +121,14 @@ struct ParallelState;
#define READ_ERROR_EXIT(fd) \
do { \
if (feof(fd)) \
- fatal("could not read from input file: end of file"); \
+ pg_fatal("could not read from input file: end of file"); \
else \
- fatal("could not read from input file: %m"); \
+ pg_fatal("could not read from input file: %m"); \
} while (0)
#define WRITE_ERROR_EXIT \
do { \
- fatal("could not write to output file: %m"); \
+ pg_fatal("could not write to output file: %m"); \
} while (0)
typedef enum T_Action
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index 77d402c323e..c3b9c365d5c 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -153,13 +153,13 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
{
AH->FH = fopen(AH->fSpec, PG_BINARY_W);
if (!AH->FH)
- fatal("could not open output file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open output file \"%s\": %m", AH->fSpec);
}
else
{
AH->FH = stdout;
if (!AH->FH)
- fatal("could not open output file: %m");
+ pg_fatal("could not open output file: %m");
}
ctx->hasSeek = checkSeek(AH->FH);
@@ -170,13 +170,13 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
{
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
if (!AH->FH)
- fatal("could not open input file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
}
else
{
AH->FH = stdin;
if (!AH->FH)
- fatal("could not open input file: %m");
+ pg_fatal("could not open input file: %m");
}
ctx->hasSeek = checkSeek(AH->FH);
@@ -373,7 +373,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
lclContext *ctx = (lclContext *) AH->formatData;
if (oid == 0)
- fatal("invalid OID for large object");
+ pg_fatal("invalid OID for large object");
WriteInt(AH, oid);
@@ -436,7 +436,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
if (ctx->hasSeek)
{
if (fseeko(AH->FH, ctx->lastFilePos, SEEK_SET) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
}
for (;;)
@@ -492,8 +492,8 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
break;
default: /* Always have a default */
- fatal("unrecognized data block type (%d) while searching archive",
- blkType);
+ pg_fatal("unrecognized data block type (%d) while searching archive",
+ blkType);
break;
}
}
@@ -502,7 +502,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
{
/* We can just seek to the place we need to be. */
if (fseeko(AH->FH, tctx->dataPos, SEEK_SET) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
_readBlockHeader(AH, &blkType, &id);
}
@@ -514,20 +514,20 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
if (blkType == EOF)
{
if (!ctx->hasSeek)
- fatal("could not find block ID %d in archive -- "
- "possibly due to out-of-order restore request, "
- "which cannot be handled due to non-seekable input file",
- te->dumpId);
+ pg_fatal("could not find block ID %d in archive -- "
+ "possibly due to out-of-order restore request, "
+ "which cannot be handled due to non-seekable input file",
+ te->dumpId);
else
- fatal("could not find block ID %d in archive -- "
- "possibly corrupt archive",
- te->dumpId);
+ pg_fatal("could not find block ID %d in archive -- "
+ "possibly corrupt archive",
+ te->dumpId);
}
/* Are we sane? */
if (id != te->dumpId)
- fatal("found unexpected block ID (%d) when reading data -- expected %d",
- id, te->dumpId);
+ pg_fatal("found unexpected block ID (%d) when reading data -- expected %d",
+ id, te->dumpId);
switch (blkType)
{
@@ -540,8 +540,8 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
break;
default: /* Always have a default */
- fatal("unrecognized data block type %d while restoring archive",
- blkType);
+ pg_fatal("unrecognized data block type %d while restoring archive",
+ blkType);
break;
}
@@ -626,7 +626,7 @@ _skipData(ArchiveHandle *AH)
if (ctx->hasSeek)
{
if (fseeko(AH->FH, blkLen, SEEK_CUR) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
}
else
{
@@ -640,9 +640,9 @@ _skipData(ArchiveHandle *AH)
if (fread(buf, 1, blkLen, AH->FH) != blkLen)
{
if (feof(AH->FH))
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
else
- fatal("could not read from input file: %m");
+ pg_fatal("could not read from input file: %m");
}
}
@@ -743,7 +743,7 @@ _CloseArchive(ArchiveHandle *AH)
/* Remember TOC's seek position for use below */
tpos = ftello(AH->FH);
if (tpos < 0 && ctx->hasSeek)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
WriteToc(AH);
WriteDataChunks(AH, NULL);
@@ -759,7 +759,7 @@ _CloseArchive(ArchiveHandle *AH)
}
if (fclose(AH->FH) != 0)
- fatal("could not close archive file: %m");
+ pg_fatal("could not close archive file: %m");
/* Sync the output file if one is defined */
if (AH->dosync && AH->mode == archModeWrite && AH->fSpec)
@@ -782,32 +782,32 @@ _ReopenArchive(ArchiveHandle *AH)
pgoff_t tpos;
if (AH->mode == archModeWrite)
- fatal("can only reopen input archives");
+ pg_fatal("can only reopen input archives");
/*
* These two cases are user-facing errors since they represent unsupported
* (but not invalid) use-cases. Word the error messages appropriately.
*/
if (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0)
- fatal("parallel restore from standard input is not supported");
+ pg_fatal("parallel restore from standard input is not supported");
if (!ctx->hasSeek)
- fatal("parallel restore from non-seekable file is not supported");
+ pg_fatal("parallel restore from non-seekable file is not supported");
tpos = ftello(AH->FH);
if (tpos < 0)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
#ifndef WIN32
if (fclose(AH->FH) != 0)
- fatal("could not close archive file: %m");
+ pg_fatal("could not close archive file: %m");
#endif
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
if (!AH->FH)
- fatal("could not open input file \"%s\": %m", AH->fSpec);
+ pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
if (fseeko(AH->FH, tpos, SEEK_SET) != 0)
- fatal("could not set seek position in archive file: %m");
+ pg_fatal("could not set seek position in archive file: %m");
}
/*
@@ -862,7 +862,7 @@ _PrepParallelRestore(ArchiveHandle *AH)
pgoff_t endpos;
if (fseeko(AH->FH, 0, SEEK_END) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
endpos = ftello(AH->FH);
if (endpos > prev_tctx->dataPos)
prev_te->dataLength = endpos - prev_tctx->dataPos;
@@ -886,7 +886,7 @@ _Clone(ArchiveHandle *AH)
/* sanity check, shouldn't happen */
if (ctx->cs != NULL)
- fatal("compressor active");
+ pg_fatal("compressor active");
/*
* We intentionally do not clone TOC-entry-local state: it's useful to
@@ -940,7 +940,7 @@ _getFilePos(ArchiveHandle *AH, lclContext *ctx)
{
/* Not expected if we found we can seek. */
if (ctx->hasSeek)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
}
return pos;
}
@@ -956,7 +956,7 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
int byt;
/*
- * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() inside
+ * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal() inside
* ReadInt rather than returning EOF. It doesn't seem worth jumping
* through hoops to deal with that case better, because no such files are
* likely to exist in the wild: only some 7.1 development versions of
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 3184eda3e75..89cdbf80e0e 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -39,7 +39,7 @@ _check_database_version(ArchiveHandle *AH)
remoteversion_str = PQparameterStatus(AH->connection, "server_version");
remoteversion = PQserverVersion(AH->connection);
if (remoteversion == 0 || !remoteversion_str)
- fatal("could not get server_version from libpq");
+ pg_fatal("could not get server_version from libpq");
AH->public.remoteVersionStr = pg_strdup(remoteversion_str);
AH->public.remoteVersion = remoteversion;
@@ -50,9 +50,10 @@ _check_database_version(ArchiveHandle *AH)
&& (remoteversion < AH->public.minRemoteVersion ||
remoteversion > AH->public.maxRemoteVersion))
{
- pg_log_error("server version: %s; %s version: %s",
- remoteversion_str, progname, PG_VERSION);
- fatal("aborting because of server version mismatch");
+ pg_log_error("aborting because of server version mismatch");
+ pg_log_error_detail("server version: %s; %s version: %s",
+ remoteversion_str, progname, PG_VERSION);
+ exit(1);
}
/*
@@ -116,7 +117,7 @@ ConnectDatabase(Archive *AHX,
bool new_pass;
if (AH->connection)
- fatal("already connected to a database");
+ pg_fatal("already connected to a database");
/* Never prompt for a password during a reconnection */
prompt_password = isReconnect ? TRI_NO : cparams->promptPassword;
@@ -166,7 +167,7 @@ ConnectDatabase(Archive *AHX,
AH->connection = PQconnectdbParams(keywords, values, true);
if (!AH->connection)
- fatal("could not connect to database");
+ pg_fatal("could not connect to database");
if (PQstatus(AH->connection) == CONNECTION_BAD &&
PQconnectionNeedsPassword(AH->connection) &&
@@ -183,11 +184,11 @@ ConnectDatabase(Archive *AHX,
if (PQstatus(AH->connection) == CONNECTION_BAD)
{
if (isReconnect)
- fatal("reconnection failed: %s",
- PQerrorMessage(AH->connection));
+ pg_fatal("reconnection failed: %s",
+ PQerrorMessage(AH->connection));
else
- fatal("%s",
- PQerrorMessage(AH->connection));
+ pg_fatal("%s",
+ PQerrorMessage(AH->connection));
}
/* Start strict; later phases may override this. */
@@ -235,7 +236,7 @@ DisconnectDatabase(Archive *AHX)
/*
* If we have an active query, send a cancel before closing, ignoring
* any errors. This is of no use for a normal exit, but might be
- * helpful during fatal().
+ * helpful during pg_fatal().
*/
if (PQtransactionStatus(AH->connection) == PQTRANS_ACTIVE)
(void) PQcancel(AH->connCancel, errbuf, sizeof(errbuf));
@@ -261,16 +262,17 @@ GetConnection(Archive *AHX)
static void
notice_processor(void *arg, const char *message)
{
- pg_log_generic(PG_LOG_INFO, "%s", message);
+ pg_log_info("%s", message);
}
-/* Like fatal(), but with a complaint about a particular query. */
+/* Like pg_fatal(), but with a complaint about a particular query. */
static void
die_on_query_failure(ArchiveHandle *AH, const char *query)
{
pg_log_error("query failed: %s",
PQerrorMessage(AH->connection));
- fatal("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
+ exit(1);
}
void
@@ -311,10 +313,10 @@ ExecuteSqlQueryForSingleRow(Archive *fout, const char *query)
/* Expecting a single result only */
ntups = PQntuples(res);
if (ntups != 1)
- fatal(ngettext("query returned %d row instead of one: %s",
- "query returned %d rows instead of one: %s",
- ntups),
- ntups, query);
+ pg_fatal(ngettext("query returned %d row instead of one: %s",
+ "query returned %d rows instead of one: %s",
+ ntups),
+ ntups, query);
return res;
}
@@ -456,8 +458,8 @@ ExecuteSqlCommandBuf(Archive *AHX, const char *buf, size_t bufLen)
*/
if (AH->pgCopyIn &&
PQputCopyData(AH->connection, buf, bufLen) <= 0)
- fatal("error returned by PQputCopyData: %s",
- PQerrorMessage(AH->connection));
+ pg_fatal("error returned by PQputCopyData: %s",
+ PQerrorMessage(AH->connection));
}
else if (AH->outputKind == OUTPUT_OTHERDATA)
{
@@ -505,8 +507,8 @@ EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
PGresult *res;
if (PQputCopyEnd(AH->connection, NULL) <= 0)
- fatal("error returned by PQputCopyEnd: %s",
- PQerrorMessage(AH->connection));
+ pg_fatal("error returned by PQputCopyEnd: %s",
+ PQerrorMessage(AH->connection));
/* Check command status and return to normal libpq state */
res = PQgetResult(AH->connection);
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 7f4e340dead..3f46f7988a1 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -153,7 +153,7 @@ InitArchiveFmt_Directory(ArchiveHandle *AH)
*/
if (!AH->fSpec || strcmp(AH->fSpec, "") == 0)
- fatal("no output directory specified");
+ pg_fatal("no output directory specified");
ctx->directory = AH->fSpec;
@@ -182,18 +182,18 @@ InitArchiveFmt_Directory(ArchiveHandle *AH)
}
if (errno)
- fatal("could not read directory \"%s\": %m",
- ctx->directory);
+ pg_fatal("could not read directory \"%s\": %m",
+ ctx->directory);
if (closedir(dir))
- fatal("could not close directory \"%s\": %m",
- ctx->directory);
+ pg_fatal("could not close directory \"%s\": %m",
+ ctx->directory);
}
}
if (!is_empty && mkdir(ctx->directory, 0700) < 0)
- fatal("could not create directory \"%s\": %m",
- ctx->directory);
+ pg_fatal("could not create directory \"%s\": %m",
+ ctx->directory);
}
else
{ /* Read Mode */
@@ -204,7 +204,7 @@ InitArchiveFmt_Directory(ArchiveHandle *AH)
tocFH = cfopen_read(fname, PG_BINARY_R);
if (tocFH == NULL)
- fatal("could not open input file \"%s\": %m", fname);
+ pg_fatal("could not open input file \"%s\": %m", fname);
ctx->dataFH = tocFH;
@@ -219,7 +219,7 @@ InitArchiveFmt_Directory(ArchiveHandle *AH)
/* Nothing else in the file, so close it again... */
if (cfclose(tocFH) != 0)
- fatal("could not close TOC file: %m");
+ pg_fatal("could not close TOC file: %m");
ctx->dataFH = NULL;
}
}
@@ -329,7 +329,7 @@ _StartData(ArchiveHandle *AH, TocEntry *te)
ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
if (ctx->dataFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
@@ -352,8 +352,8 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ pg_fatal("could not write to output file: %s",
+ get_cfp_error(ctx->dataFH));
}
}
@@ -370,7 +370,7 @@ _EndData(ArchiveHandle *AH, TocEntry *te)
/* Close the file */
if (cfclose(ctx->dataFH) != 0)
- fatal("could not close data file: %m");
+ pg_fatal("could not close data file: %m");
ctx->dataFH = NULL;
}
@@ -392,7 +392,7 @@ _PrintFileData(ArchiveHandle *AH, char *filename)
cfp = cfopen_read(filename, PG_BINARY_R);
if (!cfp)
- fatal("could not open input file \"%s\": %m", filename);
+ pg_fatal("could not open input file \"%s\": %m", filename);
buf = pg_malloc(ZLIB_OUT_SIZE);
buflen = ZLIB_OUT_SIZE;
@@ -404,7 +404,7 @@ _PrintFileData(ArchiveHandle *AH, char *filename)
free(buf);
if (cfclose(cfp) != 0)
- fatal("could not close data file \"%s\": %m", filename);
+ pg_fatal("could not close data file \"%s\": %m", filename);
}
/*
@@ -444,8 +444,8 @@ _LoadBlobs(ArchiveHandle *AH)
ctx->blobsTocFH = cfopen_read(tocfname, PG_BINARY_R);
if (ctx->blobsTocFH == NULL)
- fatal("could not open large object TOC file \"%s\" for input: %m",
- tocfname);
+ pg_fatal("could not open large object TOC file \"%s\" for input: %m",
+ tocfname);
/* Read the blobs TOC file line-by-line, and process each blob */
while ((cfgets(ctx->blobsTocFH, line, MAXPGPATH)) != NULL)
@@ -455,8 +455,8 @@ _LoadBlobs(ArchiveHandle *AH)
/* Can't overflow because line and blobfname are the same length */
if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, blobfname) != 2)
- fatal("invalid line in large object TOC file \"%s\": \"%s\"",
- tocfname, line);
+ pg_fatal("invalid line in large object TOC file \"%s\": \"%s\"",
+ tocfname, line);
StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema);
snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, blobfname);
@@ -464,12 +464,12 @@ _LoadBlobs(ArchiveHandle *AH)
EndRestoreBlob(AH, oid);
}
if (!cfeof(ctx->blobsTocFH))
- fatal("error reading large object TOC file \"%s\"",
- tocfname);
+ pg_fatal("error reading large object TOC file \"%s\"",
+ tocfname);
if (cfclose(ctx->blobsTocFH) != 0)
- fatal("could not close large object TOC file \"%s\": %m",
- tocfname);
+ pg_fatal("could not close large object TOC file \"%s\": %m",
+ tocfname);
ctx->blobsTocFH = NULL;
@@ -494,8 +494,8 @@ _WriteByte(ArchiveHandle *AH, const int i)
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ pg_fatal("could not write to output file: %s",
+ get_cfp_error(ctx->dataFH));
}
return 1;
@@ -530,8 +530,8 @@ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ pg_fatal("could not write to output file: %s",
+ get_cfp_error(ctx->dataFH));
}
}
@@ -550,7 +550,7 @@ _ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
* exit on short reads.
*/
if (cfread(buf, len, ctx->dataFH) != len)
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
}
/*
@@ -583,7 +583,7 @@ _CloseArchive(ArchiveHandle *AH)
/* The TOC is always created uncompressed */
tocFH = cfopen_write(fname, PG_BINARY_W, 0);
if (tocFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
ctx->dataFH = tocFH;
/*
@@ -596,7 +596,7 @@ _CloseArchive(ArchiveHandle *AH)
AH->format = archDirectory;
WriteToc(AH);
if (cfclose(tocFH) != 0)
- fatal("could not close TOC file: %m");
+ pg_fatal("could not close TOC file: %m");
WriteDataChunks(AH, ctx->pstate);
ParallelBackupEnd(AH, ctx->pstate);
@@ -646,7 +646,7 @@ _StartBlobs(ArchiveHandle *AH, TocEntry *te)
/* The blob TOC file is never compressed */
ctx->blobsTocFH = cfopen_write(fname, "ab", 0);
if (ctx->blobsTocFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
@@ -665,7 +665,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
if (ctx->dataFH == NULL)
- fatal("could not open output file \"%s\": %m", fname);
+ pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
@@ -682,13 +682,13 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
/* Close the BLOB data file itself */
if (cfclose(ctx->dataFH) != 0)
- fatal("could not close blob data file: %m");
+ pg_fatal("could not close blob data file: %m");
ctx->dataFH = NULL;
/* register the blob in blobs.toc */
len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid);
if (cfwrite(buf, len, ctx->blobsTocFH) != len)
- fatal("could not write to blobs TOC file");
+ pg_fatal("could not write to blobs TOC file");
}
/*
@@ -702,7 +702,7 @@ _EndBlobs(ArchiveHandle *AH, TocEntry *te)
lclContext *ctx = (lclContext *) AH->formatData;
if (cfclose(ctx->blobsTocFH) != 0)
- fatal("could not close blobs TOC file: %m");
+ pg_fatal("could not close blobs TOC file: %m");
ctx->blobsTocFH = NULL;
}
@@ -721,7 +721,7 @@ setFilePath(ArchiveHandle *AH, char *buf, const char *relativeFilename)
dname = ctx->directory;
if (strlen(dname) + 1 + strlen(relativeFilename) + 1 > MAXPGPATH)
- fatal("file name too long: \"%s\"", dname);
+ pg_fatal("file name too long: \"%s\"", dname);
strcpy(buf, dname);
strcat(buf, "/");
diff --git a/src/bin/pg_dump/pg_backup_null.c b/src/bin/pg_dump/pg_backup_null.c
index 0458979f3c0..541306d9915 100644
--- a/src/bin/pg_dump/pg_backup_null.c
+++ b/src/bin/pg_dump/pg_backup_null.c
@@ -71,7 +71,7 @@ InitArchiveFmt_Null(ArchiveHandle *AH)
* Now prevent reading...
*/
if (AH->mode == archModeRead)
- fatal("this format cannot be read");
+ pg_fatal("this format cannot be read");
}
/*
@@ -144,7 +144,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
bool old_blob_style = (AH->version < K_VERS_1_12);
if (oid == 0)
- fatal("invalid OID for large object");
+ pg_fatal("invalid OID for large object");
/* With an old archive we must do drop and create logic here */
if (old_blob_style && AH->public.ropt->dropSchema)
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index 2491a091b9e..39d71badb7b 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -169,14 +169,14 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
{
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_W);
if (ctx->tarFH == NULL)
- fatal("could not open TOC file \"%s\" for output: %m",
- AH->fSpec);
+ pg_fatal("could not open TOC file \"%s\" for output: %m",
+ AH->fSpec);
}
else
{
ctx->tarFH = stdout;
if (ctx->tarFH == NULL)
- fatal("could not open TOC file for output: %m");
+ pg_fatal("could not open TOC file for output: %m");
}
ctx->tarFHpos = 0;
@@ -195,7 +195,7 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
* positioning.
*/
if (AH->compression != 0)
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
}
else
{ /* Read Mode */
@@ -203,14 +203,14 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
{
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_R);
if (ctx->tarFH == NULL)
- fatal("could not open TOC file \"%s\" for input: %m",
- AH->fSpec);
+ pg_fatal("could not open TOC file \"%s\" for input: %m",
+ AH->fSpec);
}
else
{
ctx->tarFH = stdin;
if (ctx->tarFH == NULL)
- fatal("could not open TOC file for input: %m");
+ pg_fatal("could not open TOC file for input: %m");
}
/*
@@ -319,7 +319,7 @@ tarOpen(ArchiveHandle *AH, const char *filename, char mode)
* Couldn't find the requested file. Future: do SEEK(0) and
* retry.
*/
- fatal("could not find file \"%s\" in archive", filename);
+ pg_fatal("could not find file \"%s\" in archive", filename);
}
else
{
@@ -331,7 +331,7 @@ tarOpen(ArchiveHandle *AH, const char *filename, char mode)
if (AH->compression == 0)
tm->nFH = ctx->tarFH;
else
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
}
else
{
@@ -379,14 +379,14 @@ tarOpen(ArchiveHandle *AH, const char *filename, char mode)
#endif
if (tm->tmpFH == NULL)
- fatal("could not generate temporary file name: %m");
+ pg_fatal("could not generate temporary file name: %m");
umask(old_umask);
if (AH->compression == 0)
tm->nFH = tm->tmpFH;
else
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
tm->AH = AH;
tm->targetFile = pg_strdup(filename);
@@ -402,7 +402,7 @@ static void
tarClose(ArchiveHandle *AH, TAR_MEMBER *th)
{
if (AH->compression != 0)
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
if (th->mode == 'w')
_tarAddFile(AH, th); /* This will close the temp file */
@@ -621,8 +621,8 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
pos1 = (int) strlen(te->copyStmt) - 13;
if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
- fatal("unexpected COPY statement syntax: \"%s\"",
- te->copyStmt);
+ pg_fatal("unexpected COPY statement syntax: \"%s\"",
+ te->copyStmt);
/* Emit all but the FROM part ... */
ahwrite(te->copyStmt, 1, pos1, AH);
@@ -723,7 +723,7 @@ _ReadByte(ArchiveHandle *AH)
res = tarRead(&c, 1, ctx->FH);
if (res != 1)
/* We already would have exited for errors on reads, must be EOF */
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
ctx->filePos += 1;
return c;
}
@@ -746,7 +746,7 @@ _ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
if (tarRead(buf, len, ctx->FH) != len)
/* We already would have exited for errors on reads, must be EOF */
- fatal("could not read from input file: end of file");
+ pg_fatal("could not read from input file: end of file");
ctx->filePos += len;
}
@@ -887,10 +887,10 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
char fname[255];
if (oid == 0)
- fatal("invalid OID for large object (%u)", oid);
+ pg_fatal("invalid OID for large object (%u)", oid);
if (AH->compression != 0)
- fatal("compression is not supported by tar archive format");
+ pg_fatal("compression is not supported by tar archive format");
sprintf(fname, "blob_%u.dat", oid);
@@ -1013,12 +1013,12 @@ _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th)
* Find file len & go back to start.
*/
if (fseeko(tmp, 0, SEEK_END) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
th->fileLen = ftello(tmp);
if (th->fileLen < 0)
- fatal("could not determine seek position in archive file: %m");
+ pg_fatal("could not determine seek position in archive file: %m");
if (fseeko(tmp, 0, SEEK_SET) != 0)
- fatal("error during file seek: %m");
+ pg_fatal("error during file seek: %m");
_tarWriteHeader(th);
@@ -1032,11 +1032,11 @@ _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th)
READ_ERROR_EXIT(tmp);
if (fclose(tmp) != 0) /* This *should* delete it... */
- fatal("could not close temporary file: %m");
+ pg_fatal("could not close temporary file: %m");
if (len != th->fileLen)
- fatal("actual file length (%lld) does not match expected (%lld)",
- (long long) len, (long long) th->fileLen);
+ pg_fatal("actual file length (%lld) does not match expected (%lld)",
+ (long long) len, (long long) th->fileLen);
pad = tarPaddingBytesRequired(len);
for (i = 0; i < pad; i++)
@@ -1081,7 +1081,7 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
if (!_tarGetHeader(AH, th))
{
if (filename)
- fatal("could not find header for file \"%s\" in tar archive", filename);
+ pg_fatal("could not find header for file \"%s\" in tar archive", filename);
else
{
/*
@@ -1099,9 +1099,9 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
id = atoi(th->targetFile);
if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
- fatal("restoring data out of order is not supported in this archive format: "
- "\"%s\" is required, but comes before \"%s\" in the archive file.",
- th->targetFile, filename);
+ pg_fatal("restoring data out of order is not supported in this archive format: "
+ "\"%s\" is required, but comes before \"%s\" in the archive file.",
+ th->targetFile, filename);
/* Header doesn't match, so read to next header */
len = th->fileLen;
@@ -1112,7 +1112,7 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
_tarReadRaw(AH, &header[0], TAR_BLOCK_SIZE, NULL, ctx->tarFH);
if (!_tarGetHeader(AH, th))
- fatal("could not find header for file \"%s\" in tar archive", filename);
+ pg_fatal("could not find header for file \"%s\" in tar archive", filename);
}
ctx->tarNextMember = ctx->tarFHpos + th->fileLen
@@ -1146,10 +1146,10 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
return 0;
if (len != TAR_BLOCK_SIZE)
- fatal(ngettext("incomplete tar header found (%lu byte)",
- "incomplete tar header found (%lu bytes)",
- len),
- (unsigned long) len);
+ pg_fatal(ngettext("incomplete tar header found (%lu byte)",
+ "incomplete tar header found (%lu bytes)",
+ len),
+ (unsigned long) len);
/* Calc checksum */
chk = tarChecksum(h);
@@ -1185,8 +1185,8 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
tag, (unsigned long long) hPos, (unsigned long long) len, sum);
if (chk != sum)
- fatal("corrupt tar header found in %s (expected %d, computed %d) file position %llu",
- tag, sum, chk, (unsigned long long) ftello(ctx->tarFH));
+ pg_fatal("corrupt tar header found in %s (expected %d, computed %d) file position %llu",
+ tag, sum, chk, (unsigned long long) ftello(ctx->tarFH));
th->targetFile = pg_strdup(tag);
th->fileLen = len;
diff --git a/src/bin/pg_dump/pg_backup_utils.c b/src/bin/pg_dump/pg_backup_utils.c
index 57140a5504f..e40890cb264 100644
--- a/src/bin/pg_dump/pg_backup_utils.c
+++ b/src/bin/pg_dump/pg_backup_utils.c
@@ -52,8 +52,7 @@ set_dump_section(const char *arg, int *dumpSections)
else
{
pg_log_error("unrecognized section name: \"%s\"", arg);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
@@ -64,10 +63,7 @@ void
on_exit_nicely(on_exit_nicely_callback function, void *arg)
{
if (on_exit_nicely_index >= MAX_ON_EXIT_NICELY)
- {
- pg_log_fatal("out of on_exit_nicely slots");
- exit_nicely(1);
- }
+ pg_fatal("out of on_exit_nicely slots");
on_exit_nicely_list[on_exit_nicely_index].function = function;
on_exit_nicely_list[on_exit_nicely_index].arg = arg;
on_exit_nicely_index++;
diff --git a/src/bin/pg_dump/pg_backup_utils.h b/src/bin/pg_dump/pg_backup_utils.h
index 6ebc3afee4a..5b1c51554da 100644
--- a/src/bin/pg_dump/pg_backup_utils.h
+++ b/src/bin/pg_dump/pg_backup_utils.h
@@ -31,6 +31,12 @@ extern void set_dump_section(const char *arg, int *dumpSections);
extern void on_exit_nicely(on_exit_nicely_callback function, void *arg);
extern void exit_nicely(int code) pg_attribute_noreturn();
-#define fatal(...) do { pg_log_error(__VA_ARGS__); exit_nicely(1); } while(0)
+/* In pg_dump, we modify pg_fatal to call exit_nicely instead of exit */
+#undef pg_fatal
+#define pg_fatal(...) do { \
+ if (likely(__pg_log_level <= PG_LOG_ERROR)) \
+ pg_log_generic(PG_LOG_ERROR, PG_LOG_PRIMARY, __VA_ARGS__); \
+ exit_nicely(1); \
+ } while(0)
#endif /* PG_BACKUP_UTILS_H */
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 196f6d23a3e..969e2a7a462 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -620,7 +620,8 @@ main(int argc, char **argv)
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
@@ -637,8 +638,7 @@ main(int argc, char **argv)
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
@@ -655,32 +655,26 @@ main(int argc, char **argv)
dopt.sequence_data = 1;
if (dopt.dataOnly && dopt.schemaOnly)
- {
- pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
if (dopt.schemaOnly && foreign_servers_include_patterns.head != NULL)
- fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
+ pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
- fatal("option --include-foreign-data is not supported with parallel backup");
+ pg_fatal("option --include-foreign-data is not supported with parallel backup");
if (dopt.dataOnly && dopt.outputClean)
- {
- pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
if (dopt.if_exists && !dopt.outputClean)
- fatal("option --if-exists requires option -c/--clean");
+ pg_fatal("option --if-exists requires option -c/--clean");
/*
* --inserts are already implied above if --column-inserts or
* --rows-per-insert were specified.
*/
if (dopt.do_nothing && dopt.dump_inserts == 0)
- fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
+ pg_fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
/* Identify archive format to emit */
archiveFormat = parseArchiveFormat(format, &archiveMode);
@@ -715,7 +709,7 @@ main(int argc, char **argv)
/* Parallel backup only in the directory archive format so far */
if (archiveFormat != archDirectory && numWorkers > 1)
- fatal("parallel backup only supported by the directory format");
+ pg_fatal("parallel backup only supported by the directory format");
/* Open the output file */
fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
@@ -770,7 +764,7 @@ main(int argc, char **argv)
&schema_include_oids,
strict_names);
if (schema_include_oids.head == NULL)
- fatal("no matching schemas were found");
+ pg_fatal("no matching schemas were found");
}
expand_schema_name_patterns(fout, &schema_exclude_patterns,
&schema_exclude_oids,
@@ -784,7 +778,7 @@ main(int argc, char **argv)
&table_include_oids,
strict_names);
if (table_include_oids.head == NULL)
- fatal("no matching tables were found");
+ pg_fatal("no matching tables were found");
}
expand_table_name_patterns(fout, &table_exclude_patterns,
&table_exclude_oids,
@@ -806,7 +800,7 @@ main(int argc, char **argv)
&extension_include_oids,
strict_names);
if (extension_include_oids.head == NULL)
- fatal("no matching extensions were found");
+ pg_fatal("no matching extensions were found");
}
/*
@@ -1087,8 +1081,8 @@ setup_connection(Archive *AH, const char *dumpencoding,
if (dumpencoding)
{
if (PQsetClientEncoding(conn, dumpencoding) < 0)
- fatal("invalid client encoding \"%s\" specified",
- dumpencoding);
+ pg_fatal("invalid client encoding \"%s\" specified",
+ dumpencoding);
}
/*
@@ -1225,7 +1219,7 @@ setup_connection(Archive *AH, const char *dumpencoding,
else if (AH->numWorkers > 1)
{
if (AH->isStandby && AH->remoteVersion < 100000)
- fatal("parallel dumps from standby servers are not supported by this server version");
+ pg_fatal("parallel dumps from standby servers are not supported by this server version");
AH->sync_snapshot_id = get_synchronized_snapshot(AH);
}
}
@@ -1290,7 +1284,7 @@ parseArchiveFormat(const char *format, ArchiveMode *mode)
else if (pg_strcasecmp(format, "tar") == 0)
archiveFormat = archTar;
else
- fatal("invalid output format \"%s\" specified", format);
+ pg_fatal("invalid output format \"%s\" specified", format);
return archiveFormat;
}
@@ -1328,7 +1322,7 @@ expand_schema_name_patterns(Archive *fout,
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (strict_names && PQntuples(res) == 0)
- fatal("no matching schemas were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching schemas were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
@@ -1375,7 +1369,7 @@ expand_extension_name_patterns(Archive *fout,
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (strict_names && PQntuples(res) == 0)
- fatal("no matching extensions were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching extensions were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
@@ -1422,7 +1416,7 @@ expand_foreign_server_name_patterns(Archive *fout,
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) == 0)
- fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
@@ -1485,7 +1479,7 @@ expand_table_name_patterns(Archive *fout,
PQclear(ExecuteSqlQueryForSingleRow(fout,
ALWAYS_SECURE_SEARCH_PATH_SQL));
if (strict_names && PQntuples(res) == 0)
- fatal("no matching tables were found for pattern \"%s\"", cell->val);
+ pg_fatal("no matching tables were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
@@ -2033,8 +2027,8 @@ dumpTableData_copy(Archive *fout, const void *dcontext)
{
/* copy data transfer failed */
pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
- pg_log_error("Error message from server: %s", PQerrorMessage(conn));
- pg_log_error("The command was: %s", q->data);
+ pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
+ pg_log_error_detail("Command was: %s", q->data);
exit_nicely(1);
}
@@ -2043,8 +2037,8 @@ dumpTableData_copy(Archive *fout, const void *dcontext)
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
- pg_log_error("Error message from server: %s", PQerrorMessage(conn));
- pg_log_error("The command was: %s", q->data);
+ pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
+ pg_log_error_detail("Command was: %s", q->data);
exit_nicely(1);
}
PQclear(res);
@@ -2124,8 +2118,8 @@ dumpTableData_insert(Archive *fout, const void *dcontext)
/* cross-check field count, allowing for dummy NULL if any */
if (nfields != PQnfields(res) &&
!(nfields == 0 && PQnfields(res) == 1))
- fatal("wrong number of fields retrieved from table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("wrong number of fields retrieved from table \"%s\"",
+ tbinfo->dobj.name);
/*
* First time through, we build as much of the INSERT statement as
@@ -2877,8 +2871,8 @@ dumpDatabase(Archive *fout)
else if (datlocprovider[0] == 'i')
appendPQExpBufferStr(creaQry, "icu");
else
- fatal("unrecognized locale provider: %s",
- datlocprovider);
+ pg_fatal("unrecognized locale provider: %s",
+ datlocprovider);
if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
{
@@ -3257,7 +3251,7 @@ dumpSearchPath(Archive *AH)
"SELECT pg_catalog.current_schemas(false)");
if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
- fatal("could not parse result of current_schemas()");
+ pg_fatal("could not parse result of current_schemas()");
/*
* We use set_config(), not a simple "SET search_path" command, because
@@ -3483,8 +3477,8 @@ dumpBlobs(Archive *fout, const void *arg)
/* Open the BLOB */
loFd = lo_open(conn, blobOid, INV_READ);
if (loFd == -1)
- fatal("could not open large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ pg_fatal("could not open large object %u: %s",
+ blobOid, PQerrorMessage(conn));
StartBlob(fout, blobOid);
@@ -3493,8 +3487,8 @@ dumpBlobs(Archive *fout, const void *arg)
{
cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
if (cnt < 0)
- fatal("error reading large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ pg_fatal("error reading large object %u: %s",
+ blobOid, PQerrorMessage(conn));
WriteData(fout, buf, cnt);
} while (cnt > 0);
@@ -3740,11 +3734,8 @@ dumpPolicy(Archive *fout, const PolicyInfo *polinfo)
else if (polinfo->polcmd == 'd')
cmd = " FOR DELETE";
else
- {
- pg_log_error("unexpected policy command type: %c",
- polinfo->polcmd);
- exit_nicely(1);
- }
+ pg_fatal("unexpected policy command type: %c",
+ polinfo->polcmd);
query = createPQExpBuffer();
delqry = createPQExpBuffer();
@@ -4193,7 +4184,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
if (!parsePGArray(PQgetvalue(res, i, i_prattrs),
&attnames, &nattnames))
- fatal("could not parse %s array", "prattrs");
+ pg_fatal("could not parse %s array", "prattrs");
attribs = createPQExpBuffer();
for (int k = 0; k < nattnames; k++)
{
@@ -4510,7 +4501,7 @@ dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo)
/* Build list of quoted publications and append them to query. */
if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
- fatal("could not parse %s array", "subpublications");
+ pg_fatal("could not parse %s array", "subpublications");
publications = createPQExpBuffer();
for (i = 0; i < npubnames; i++)
@@ -4892,8 +4883,8 @@ binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
extobj = NULL;
}
if (extobj == NULL)
- fatal("could not find parent extension for %s %s",
- objtype, objname);
+ pg_fatal("could not find parent extension for %s %s",
+ objtype, objname);
appendPQExpBufferStr(upgrade_buffer,
"\n-- For binary upgrade, handle extension membership the hard way\n");
@@ -5037,7 +5028,7 @@ findNamespace(Oid nsoid)
nsinfo = findNamespaceByOid(nsoid);
if (nsinfo == NULL)
- fatal("schema with OID %u does not exist", nsoid);
+ pg_fatal("schema with OID %u does not exist", nsoid);
return nsinfo;
}
@@ -6491,8 +6482,8 @@ getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
owning_tab = findTableByOid(seqinfo->owning_tab);
if (owning_tab == NULL)
- fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
- seqinfo->owning_tab, seqinfo->dobj.catId.oid);
+ pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
+ seqinfo->owning_tab, seqinfo->dobj.catId.oid);
/*
* Only dump identity sequences if we're going to dump the table that
@@ -6795,12 +6786,12 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", indrelid);
+ pg_fatal("unrecognized table OID %u", indrelid);
/* cross-check that we only got requested tables */
if (!tbinfo->hasindex ||
!tbinfo->interesting)
- fatal("unexpected index data for table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("unexpected index data for table \"%s\"",
+ tbinfo->dobj.name);
/* Save data for this table */
tbinfo->indexes = indxinfo + j;
@@ -7062,7 +7053,7 @@ getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", conrelid);
+ pg_fatal("unrecognized table OID %u", conrelid);
}
constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
@@ -7294,8 +7285,8 @@ getRules(Archive *fout, int *numRules)
ruletableoid = atooid(PQgetvalue(res, i, i_ruletable));
ruleinfo[i].ruletable = findTableByOid(ruletableoid);
if (ruleinfo[i].ruletable == NULL)
- fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
- ruletableoid, ruleinfo[i].dobj.catId.oid);
+ pg_fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
+ ruletableoid, ruleinfo[i].dobj.catId.oid);
ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
@@ -7533,7 +7524,7 @@ getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", tgrelid);
+ pg_fatal("unrecognized table OID %u", tgrelid);
/* Save data for this table */
tbinfo->triggers = tginfo + j;
@@ -7585,10 +7576,10 @@ getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
if (OidIsValid(tginfo[j].tgconstrrelid))
{
if (PQgetisnull(res, j, i_tgconstrrelname))
- fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)",
- tginfo[j].dobj.name,
- tbinfo->dobj.name,
- tginfo[j].tgconstrrelid);
+ pg_fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)",
+ tginfo[j].dobj.name,
+ tbinfo->dobj.name,
+ tginfo[j].tgconstrrelid);
tginfo[j].tgconstrrelname = pg_strdup(PQgetvalue(res, j, i_tgconstrrelname));
}
else
@@ -8200,12 +8191,12 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", attrelid);
+ pg_fatal("unrecognized table OID %u", attrelid);
/* cross-check that we only got requested tables */
if (tbinfo->relkind == RELKIND_SEQUENCE ||
!tbinfo->interesting)
- fatal("unexpected column data for table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("unexpected column data for table \"%s\"",
+ tbinfo->dobj.name);
/* Save data for this table */
tbinfo->numatts = numatts;
@@ -8234,8 +8225,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
for (int j = 0; j < numatts; j++, r++)
{
if (j + 1 != atoi(PQgetvalue(res, r, i_attnum)))
- fatal("invalid column numbering in table \"%s\"",
- tbinfo->dobj.name);
+ pg_fatal("invalid column numbering in table \"%s\"",
+ tbinfo->dobj.name);
tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, r, i_attname));
tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, r, i_atttypname));
tbinfo->atttypmod[j] = atoi(PQgetvalue(res, r, i_atttypmod));
@@ -8321,12 +8312,12 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", adrelid);
+ pg_fatal("unrecognized table OID %u", adrelid);
}
if (adnum <= 0 || adnum > tbinfo->numatts)
- fatal("invalid adnum value %d for table \"%s\"",
- adnum, tbinfo->dobj.name);
+ pg_fatal("invalid adnum value %d for table \"%s\"",
+ adnum, tbinfo->dobj.name);
/*
* dropped columns shouldn't have defaults, but just in case,
@@ -8475,7 +8466,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
break;
}
if (curtblindx >= numTables)
- fatal("unrecognized table OID %u", conrelid);
+ pg_fatal("unrecognized table OID %u", conrelid);
if (numcons != tbinfo->ncheck)
{
@@ -8483,7 +8474,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
"expected %d check constraints on table \"%s\" but found %d",
tbinfo->ncheck),
tbinfo->ncheck, tbinfo->dobj.name, numcons);
- pg_log_error("(The system catalogs might be corrupted.)");
+ pg_log_error_hint("The system catalogs might be corrupted.");
exit_nicely(1);
}
@@ -9173,7 +9164,7 @@ getRoleName(const char *roleoid_str)
}
}
- fatal("role with OID %u does not exist", roleoid);
+ pg_fatal("role with OID %u does not exist", roleoid);
return NULL; /* keep compiler quiet */
}
@@ -11641,7 +11632,7 @@ dumpFunc(Archive *fout, const FuncInfo *finfo)
if (*proconfig)
{
if (!parsePGArray(proconfig, &configitems, &nconfigitems))
- fatal("could not parse %s array", "proconfig");
+ pg_fatal("could not parse %s array", "proconfig");
}
else
{
@@ -11710,8 +11701,8 @@ dumpFunc(Archive *fout, const FuncInfo *finfo)
else if (provolatile[0] == PROVOLATILE_STABLE)
appendPQExpBufferStr(q, " STABLE");
else if (provolatile[0] != PROVOLATILE_VOLATILE)
- fatal("unrecognized provolatile value for function \"%s\"",
- finfo->dobj.name);
+ pg_fatal("unrecognized provolatile value for function \"%s\"",
+ finfo->dobj.name);
}
if (proisstrict[0] == 't')
@@ -11760,8 +11751,8 @@ dumpFunc(Archive *fout, const FuncInfo *finfo)
else if (proparallel[0] == PROPARALLEL_RESTRICTED)
appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
- fatal("unrecognized proparallel value for function \"%s\"",
- finfo->dobj.name);
+ pg_fatal("unrecognized proparallel value for function \"%s\"",
+ finfo->dobj.name);
}
for (i = 0; i < nconfigitems; i++)
@@ -11891,8 +11882,8 @@ dumpCast(Archive *fout, const CastInfo *cast)
{
funcInfo = findFuncByOid(cast->castfunc);
if (funcInfo == NULL)
- fatal("could not find function definition for function with OID %u",
- cast->castfunc);
+ pg_fatal("could not find function definition for function with OID %u",
+ cast->castfunc);
}
defqry = createPQExpBuffer();
@@ -11997,15 +11988,15 @@ dumpTransform(Archive *fout, const TransformInfo *transform)
{
fromsqlFuncInfo = findFuncByOid(transform->trffromsql);
if (fromsqlFuncInfo == NULL)
- fatal("could not find function definition for function with OID %u",
- transform->trffromsql);
+ pg_fatal("could not find function definition for function with OID %u",
+ transform->trffromsql);
}
if (OidIsValid(transform->trftosql))
{
tosqlFuncInfo = findFuncByOid(transform->trftosql);
if (tosqlFuncInfo == NULL)
- fatal("could not find function definition for function with OID %u",
- transform->trftosql);
+ pg_fatal("could not find function definition for function with OID %u",
+ transform->trftosql);
}
defqry = createPQExpBuffer();
@@ -13063,8 +13054,8 @@ dumpCollation(Archive *fout, const CollInfo *collinfo)
/* to allow dumping pg_catalog; not accepted on input */
appendPQExpBufferStr(q, "default");
else
- fatal("unrecognized collation provider: %s",
- collprovider);
+ pg_fatal("unrecognized collation provider: %s",
+ collprovider);
if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
appendPQExpBufferStr(q, ", deterministic = false");
@@ -13470,8 +13461,8 @@ dumpAgg(Archive *fout, const AggInfo *agginfo)
appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_WRITE");
break;
default:
- fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
- agginfo->aggfn.dobj.name);
+ pg_fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
+ agginfo->aggfn.dobj.name);
break;
}
}
@@ -13526,8 +13517,8 @@ dumpAgg(Archive *fout, const AggInfo *agginfo)
appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_WRITE");
break;
default:
- fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
- agginfo->aggfn.dobj.name);
+ pg_fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
+ agginfo->aggfn.dobj.name);
break;
}
}
@@ -13551,8 +13542,8 @@ dumpAgg(Archive *fout, const AggInfo *agginfo)
else if (proparallel[0] == PROPARALLEL_RESTRICTED)
appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
- fatal("unrecognized proparallel value for function \"%s\"",
- agginfo->aggfn.dobj.name);
+ pg_fatal("unrecognized proparallel value for function \"%s\"",
+ agginfo->aggfn.dobj.name);
}
appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
@@ -14244,8 +14235,8 @@ dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo)
break;
default:
/* shouldn't get here */
- fatal("unrecognized object type in default privileges: %d",
- (int) daclinfo->defaclobjtype);
+ pg_fatal("unrecognized object type in default privileges: %d",
+ (int) daclinfo->defaclobjtype);
type = ""; /* keep compiler quiet */
}
@@ -14260,8 +14251,8 @@ dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo)
daclinfo->defaclrole,
fout->remoteVersion,
q))
- fatal("could not parse default ACL list (%s)",
- daclinfo->dacl.acl);
+ pg_fatal("could not parse default ACL list (%s)",
+ daclinfo->dacl.acl);
if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
@@ -14342,8 +14333,8 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
if (!buildACLCommands(name, subname, nspname, type,
initprivs, acldefault, owner,
"", fout->remoteVersion, sql))
- fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
- initprivs, acldefault, name, type);
+ pg_fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
+ initprivs, acldefault, name, type);
appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
}
@@ -14367,8 +14358,8 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
if (!buildACLCommands(name, subname, nspname, type,
acls, baseacls, owner,
"", fout->remoteVersion, sql))
- fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
- acls, baseacls, name, type);
+ pg_fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
+ acls, baseacls, name, type);
if (sql->len > 0)
{
@@ -14905,18 +14896,18 @@ createViewAsClause(Archive *fout, const TableInfo *tbinfo)
if (PQntuples(res) != 1)
{
if (PQntuples(res) < 1)
- fatal("query to obtain definition of view \"%s\" returned no data",
- tbinfo->dobj.name);
+ pg_fatal("query to obtain definition of view \"%s\" returned no data",
+ tbinfo->dobj.name);
else
- fatal("query to obtain definition of view \"%s\" returned more than one definition",
- tbinfo->dobj.name);
+ pg_fatal("query to obtain definition of view \"%s\" returned more than one definition",
+ tbinfo->dobj.name);
}
len = PQgetlength(res, 0, 0);
if (len == 0)
- fatal("definition of view \"%s\" appears to be empty (length zero)",
- tbinfo->dobj.name);
+ pg_fatal("definition of view \"%s\" appears to be empty (length zero)",
+ tbinfo->dobj.name);
/* Strip off the trailing semicolon so that other things may follow. */
Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
@@ -15928,8 +15919,8 @@ getAttrName(int attrnum, const TableInfo *tblInfo)
case TableOidAttributeNumber:
return "tableoid";
}
- fatal("invalid column number %d for table \"%s\"",
- attrnum, tblInfo->dobj.name);
+ pg_fatal("invalid column number %d for table \"%s\"",
+ attrnum, tblInfo->dobj.name);
return NULL; /* keep compiler quiet */
}
@@ -16006,11 +15997,11 @@ dumpIndex(Archive *fout, const IndxInfo *indxinfo)
int j;
if (!parsePGArray(indstatcols, &indstatcolsarray, &nstatcols))
- fatal("could not parse index statistic columns");
+ pg_fatal("could not parse index statistic columns");
if (!parsePGArray(indstatvals, &indstatvalsarray, &nstatvals))
- fatal("could not parse index statistic values");
+ pg_fatal("could not parse index statistic values");
if (nstatcols != nstatvals)
- fatal("mismatched number of columns and values for index statistics");
+ pg_fatal("mismatched number of columns and values for index statistics");
for (j = 0; j < nstatcols; j++)
{
@@ -16228,8 +16219,8 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
indxinfo = (IndxInfo *) findObjectByDumpId(coninfo->conindex);
if (indxinfo == NULL)
- fatal("missing index for constraint \"%s\"",
- coninfo->dobj.name);
+ pg_fatal("missing index for constraint \"%s\"",
+ coninfo->dobj.name);
if (dopt->binary_upgrade)
binary_upgrade_set_pg_class_oids(fout, q,
@@ -16456,8 +16447,8 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
}
else
{
- fatal("unrecognized constraint type: %c",
- coninfo->contype);
+ pg_fatal("unrecognized constraint type: %c",
+ coninfo->contype);
}
/* Dump Constraint Comments --- only works for table constraints */
@@ -16557,13 +16548,10 @@ dumpSequence(Archive *fout, const TableInfo *tbinfo)
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) != 1)
- {
- pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
- "query to get data of sequence \"%s\" returned %d rows (expected 1)",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
- exit_nicely(1);
- }
+ pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)",
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
seqtype = PQgetvalue(res, 0, 0);
startv = PQgetvalue(res, 0, 1);
@@ -16592,7 +16580,7 @@ dumpSequence(Archive *fout, const TableInfo *tbinfo)
}
else
{
- fatal("unrecognized sequence type: %s", seqtype);
+ pg_fatal("unrecognized sequence type: %s", seqtype);
default_minv = default_maxv = 0; /* keep compiler quiet */
}
@@ -16725,8 +16713,8 @@ dumpSequence(Archive *fout, const TableInfo *tbinfo)
TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
if (owning_tab == NULL)
- fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
- tbinfo->owning_tab, tbinfo->dobj.catId.oid);
+ pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
+ tbinfo->owning_tab, tbinfo->dobj.catId.oid);
if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
{
@@ -16789,13 +16777,10 @@ dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo)
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) != 1)
- {
- pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
- "query to get data of sequence \"%s\" returned %d rows (expected 1)",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
- exit_nicely(1);
- }
+ pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)",
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
last = PQgetvalue(res, 0, 0);
called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
@@ -16884,10 +16869,7 @@ dumpTrigger(Archive *fout, const TriggerInfo *tginfo)
else if (TRIGGER_FOR_INSTEAD(tginfo->tgtype))
appendPQExpBufferStr(query, "INSTEAD OF");
else
- {
- pg_log_error("unexpected tgtype value: %d", tginfo->tgtype);
- exit_nicely(1);
- }
+ pg_fatal("unexpected tgtype value: %d", tginfo->tgtype);
findx = 0;
if (TRIGGER_FOR_INSERT(tginfo->tgtype))
@@ -16959,11 +16941,10 @@ dumpTrigger(Archive *fout, const TriggerInfo *tginfo)
if (p + tlen >= tgargs + lentgargs)
{
/* hm, not found before end of bytea value... */
- pg_log_error("invalid argument string (%s) for trigger \"%s\" on table \"%s\"",
- tginfo->tgargs,
- tginfo->dobj.name,
- tbinfo->dobj.name);
- exit_nicely(1);
+ pg_fatal("invalid argument string (%s) for trigger \"%s\" on table \"%s\"",
+ tginfo->tgargs,
+ tginfo->dobj.name,
+ tbinfo->dobj.name);
}
if (findx > 0)
@@ -17229,11 +17210,8 @@ dumpRule(Archive *fout, const RuleInfo *rinfo)
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) != 1)
- {
- pg_log_error("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
- rinfo->dobj.name, tbinfo->dobj.name);
- exit_nicely(1);
- }
+ pg_fatal("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
+ rinfo->dobj.name, tbinfo->dobj.name);
printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
@@ -17471,11 +17449,11 @@ processExtensionTables(Archive *fout, ExtensionInfo extinfo[],
int j;
if (!parsePGArray(extconfig, &extconfigarray, &nconfigitems))
- fatal("could not parse %s array", "extconfig");
+ pg_fatal("could not parse %s array", "extconfig");
if (!parsePGArray(extcondition, &extconditionarray, &nconditionitems))
- fatal("could not parse %s array", "extcondition");
+ pg_fatal("could not parse %s array", "extcondition");
if (nconfigitems != nconditionitems)
- fatal("mismatched number of configurations and conditions for extension");
+ pg_fatal("mismatched number of configurations and conditions for extension");
for (j = 0; j < nconfigitems; j++)
{
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index 15920908395..5de3241eb49 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -419,13 +419,13 @@ TopoSort(DumpableObject **objs,
obj = objs[i];
j = obj->dumpId;
if (j <= 0 || j > maxDumpId)
- fatal("invalid dumpId %d", j);
+ pg_fatal("invalid dumpId %d", j);
idMap[j] = i;
for (j = 0; j < obj->nDeps; j++)
{
k = obj->dependencies[j];
if (k <= 0 || k > maxDumpId)
- fatal("invalid dependency %d", k);
+ pg_fatal("invalid dependency %d", k);
beforeConstraints[k]++;
}
}
@@ -658,7 +658,7 @@ findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs)
/* We'd better have fixed at least one loop */
if (!fixedloop)
- fatal("could not identify dependency loop");
+ pg_fatal("could not identify dependency loop");
free(workspace);
free(searchFailed);
@@ -1233,9 +1233,9 @@ repairDependencyLoop(DumpableObject **loop,
"there are circular foreign-key constraints among these tables:",
nLoop));
for (i = 0; i < nLoop; i++)
- pg_log_generic(PG_LOG_INFO, " %s", loop[i]->name);
- pg_log_generic(PG_LOG_INFO, "You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.");
- pg_log_generic(PG_LOG_INFO, "Consider using a full dump instead of a --data-only dump to avoid this problem.");
+ pg_log_info(" %s", loop[i]->name);
+ pg_log_info("You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.");
+ pg_log_info("Consider using a full dump instead of a --data-only dump to avoid this problem.");
if (nLoop > 1)
removeObjectDependency(loop[0], loop[1]->dumpId);
else /* must be a self-dependency */
@@ -1253,7 +1253,7 @@ repairDependencyLoop(DumpableObject **loop,
char buf[1024];
describeDumpableObject(loop[i], buf, sizeof(buf));
- pg_log_generic(PG_LOG_INFO, " %s", buf);
+ pg_log_info(" %s", buf);
}
if (nLoop > 1)
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 2dc33627630..6ef3d614211 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -202,16 +202,11 @@ main(int argc, char *argv[])
strlcpy(full_path, progname, sizeof(full_path));
if (ret == -1)
- pg_log_error("The program \"%s\" is needed by %s but was not found in the\n"
- "same directory as \"%s\".\n"
- "Check your installation.",
- "pg_dump", progname, full_path);
+ pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"",
+ "pg_dump", progname, full_path);
else
- pg_log_error("The program \"%s\" was found by \"%s\"\n"
- "but was not the same version as %s.\n"
- "Check your installation.",
- "pg_dump", full_path, progname);
- exit_nicely(1);
+ pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s",
+ "pg_dump", full_path, progname);
}
pgdumpopts = createPQExpBuffer();
@@ -341,7 +336,8 @@ main(int argc, char *argv[])
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
@@ -351,8 +347,7 @@ main(int argc, char *argv[])
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
@@ -360,8 +355,7 @@ main(int argc, char *argv[])
(globals_only || roles_only || tablespaces_only))
{
pg_log_error("option --exclude-database cannot be used together with -g/--globals-only, -r/--roles-only, or -t/--tablespaces-only");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
@@ -369,30 +363,24 @@ main(int argc, char *argv[])
if (globals_only && roles_only)
{
pg_log_error("options -g/--globals-only and -r/--roles-only cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
if (globals_only && tablespaces_only)
{
pg_log_error("options -g/--globals-only and -t/--tablespaces-only cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
if (if_exists && !output_clean)
- {
- pg_log_error("option --if-exists requires option -c/--clean");
- exit_nicely(1);
- }
+ pg_fatal("option --if-exists requires option -c/--clean");
if (roles_only && tablespaces_only)
{
pg_log_error("options -r/--roles-only and -t/--tablespaces-only cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
@@ -453,10 +441,7 @@ main(int argc, char *argv[])
prompt_password, false);
if (!conn)
- {
- pg_log_error("could not connect to database \"%s\"", pgdb);
- exit_nicely(1);
- }
+ pg_fatal("could not connect to database \"%s\"", pgdb);
}
else
{
@@ -470,8 +455,7 @@ main(int argc, char *argv[])
{
pg_log_error("could not connect to databases \"postgres\" or \"template1\"\n"
"Please specify an alternative database.");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
@@ -489,11 +473,8 @@ main(int argc, char *argv[])
{
OPF = fopen(filename, PG_BINARY_W);
if (!OPF)
- {
- pg_log_error("could not open output file \"%s\": %m",
- filename);
- exit_nicely(1);
- }
+ pg_fatal("could not open output file \"%s\": %m",
+ filename);
}
else
OPF = stdout;
@@ -504,11 +485,8 @@ main(int argc, char *argv[])
if (dumpencoding)
{
if (PQsetClientEncoding(conn, dumpencoding) < 0)
- {
- pg_log_error("invalid client encoding \"%s\" specified",
- dumpencoding);
- exit_nicely(1);
- }
+ pg_fatal("invalid client encoding \"%s\" specified",
+ dumpencoding);
}
/*
@@ -1386,20 +1364,14 @@ dumpDatabases(PGconn *conn)
ret = runPgDump(dbname, create_opts);
if (ret != 0)
- {
- pg_log_error("pg_dump failed on database \"%s\", exiting", dbname);
- exit_nicely(1);
- }
+ pg_fatal("pg_dump failed on database \"%s\", exiting", dbname);
if (filename)
{
OPF = fopen(filename, PG_BINARY_A);
if (!OPF)
- {
- pg_log_error("could not re-open the output file \"%s\": %m",
- filename);
- exit_nicely(1);
- }
+ pg_fatal("could not re-open the output file \"%s\": %m",
+ filename);
}
}
@@ -1535,10 +1507,7 @@ connectDatabase(const char *dbname, const char *connection_string,
{
conn_opts = PQconninfoParse(connection_string, &err_msg);
if (conn_opts == NULL)
- {
- pg_log_error("%s", err_msg);
- exit_nicely(1);
- }
+ pg_fatal("%s", err_msg);
for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++)
{
@@ -1605,10 +1574,7 @@ connectDatabase(const char *dbname, const char *connection_string,
conn = PQconnectdbParams(keywords, values, true);
if (!conn)
- {
- pg_log_error("could not connect to database \"%s\"", dbname);
- exit_nicely(1);
- }
+ pg_fatal("could not connect to database \"%s\"", dbname);
if (PQstatus(conn) == CONNECTION_BAD &&
PQconnectionNeedsPassword(conn) &&
@@ -1625,10 +1591,7 @@ connectDatabase(const char *dbname, const char *connection_string,
if (PQstatus(conn) == CONNECTION_BAD)
{
if (fail_on_error)
- {
- pg_log_error("%s", PQerrorMessage(conn));
- exit_nicely(1);
- }
+ pg_fatal("%s", PQerrorMessage(conn));
else
{
PQfinish(conn);
@@ -1654,17 +1617,11 @@ connectDatabase(const char *dbname, const char *connection_string,
/* Check version */
remoteversion_str = PQparameterStatus(conn, "server_version");
if (!remoteversion_str)
- {
- pg_log_error("could not get server version");
- exit_nicely(1);
- }
+ pg_fatal("could not get server version");
server_version = PQserverVersion(conn);
if (server_version == 0)
- {
- pg_log_error("could not parse server version \"%s\"",
- remoteversion_str);
- exit_nicely(1);
- }
+ pg_fatal("could not parse server version \"%s\"",
+ remoteversion_str);
my_version = PG_VERSION_NUM;
@@ -1676,9 +1633,9 @@ connectDatabase(const char *dbname, const char *connection_string,
&& (server_version < 90200 ||
(server_version / 100) > (my_version / 100)))
{
- pg_log_error("server version: %s; %s version: %s",
- remoteversion_str, progname, PG_VERSION);
pg_log_error("aborting because of server version mismatch");
+ pg_log_error_detail("server version: %s; %s version: %s",
+ remoteversion_str, progname, PG_VERSION);
exit_nicely(1);
}
@@ -1740,7 +1697,7 @@ executeQuery(PGconn *conn, const char *query)
PQresultStatus(res) != PGRES_TUPLES_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_error("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
PQfinish(conn);
exit_nicely(1);
}
@@ -1763,7 +1720,7 @@ executeCommand(PGconn *conn, const char *query)
PQresultStatus(res) != PGRES_COMMAND_OK)
{
pg_log_error("query failed: %s", PQerrorMessage(conn));
- pg_log_error("query was: %s", query);
+ pg_log_error_detail("Query was: %s", query);
PQfinish(conn);
exit_nicely(1);
}
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 55bf1b69755..049a1006347 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -287,7 +287,8 @@ main(int argc, char **argv)
break;
default:
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ /* getopt_long already emitted a complaint */
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
}
@@ -303,17 +304,13 @@ main(int argc, char **argv)
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
/* Complain if neither -f nor -d was specified (except if dumping TOC) */
if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary)
- {
- pg_log_error("one of -d/--dbname and -f/--file must be specified");
- exit_nicely(1);
- }
+ pg_fatal("one of -d/--dbname and -f/--file must be specified");
/* Should get at most one of -d and -f, else user is confused */
if (opts->cparams.dbname)
@@ -321,41 +318,28 @@ main(int argc, char **argv)
if (opts->filename)
{
pg_log_error("options -d/--dbname and -f/--file cannot be used together");
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
opts->useDB = 1;
}
if (opts->dataOnly && opts->schemaOnly)
- {
- pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
if (opts->dataOnly && opts->dropSchema)
- {
- pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
/*
* -C is not compatible with -1, because we can't create a database inside
* a transaction block.
*/
if (opts->createDB && opts->single_txn)
- {
- pg_log_error("options -C/--create and -1/--single-transaction cannot be used together");
- exit_nicely(1);
- }
+ pg_fatal("options -C/--create and -1/--single-transaction cannot be used together");
/* Can't do single-txn mode with multiple connections */
if (opts->single_txn && numWorkers > 1)
- {
- pg_log_error("cannot specify both --single-transaction and multiple jobs");
- exit_nicely(1);
- }
+ pg_fatal("cannot specify both --single-transaction and multiple jobs");
opts->disable_triggers = disable_triggers;
opts->enable_row_security = enable_row_security;
@@ -369,10 +353,7 @@ main(int argc, char **argv)
opts->no_subscriptions = no_subscriptions;
if (if_exists && !opts->dropSchema)
- {
- pg_log_error("option --if-exists requires option -c/--clean");
- exit_nicely(1);
- }
+ pg_fatal("option --if-exists requires option -c/--clean");
opts->if_exists = if_exists;
opts->strict_names = strict_names;
@@ -396,9 +377,8 @@ main(int argc, char **argv)
break;
default:
- pg_log_error("unrecognized archive format \"%s\"; please specify \"c\", \"d\", or \"t\"",
- opts->formatName);
- exit_nicely(1);
+ pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", or \"t\"",
+ opts->formatName);
}
}
diff --git a/src/bin/pg_dump/t/003_pg_dump_with_server.pl b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
index 528db179cbd..c2848663264 100644
--- a/src/bin/pg_dump/t/003_pg_dump_with_server.pl
+++ b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
@@ -30,7 +30,7 @@ my ($cmd, $stdout, $stderr, $result);
command_fails_like(
[ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ],
- qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: error: query was:.*t0/,
+ qr/foreign-data wrapper \"dummy\" has no handler\r?\ndetail: Query was: .*t0/,
"correctly fails to dump a foreign table from a dummy FDW");
command_ok(