aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/transam
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2003-08-04 00:43:34 +0000
committerBruce Momjian <bruce@momjian.us>2003-08-04 00:43:34 +0000
commit089003fb462fcce46c02bf47322b429f73c33c50 (patch)
tree77d78bc3a149df06f5603f60200a6ab363336624 /src/backend/access/transam
parent63354a0228a1dbc4a0d5ddc8ecdd8326349d2100 (diff)
downloadpostgresql-089003fb462fcce46c02bf47322b429f73c33c50.tar.gz
postgresql-089003fb462fcce46c02bf47322b429f73c33c50.zip
pgindent run.
Diffstat (limited to 'src/backend/access/transam')
-rw-r--r--src/backend/access/transam/clog.c4
-rw-r--r--src/backend/access/transam/rmgr.c6
-rw-r--r--src/backend/access/transam/slru.c94
-rw-r--r--src/backend/access/transam/xact.c58
-rw-r--r--src/backend/access/transam/xlog.c251
5 files changed, 213 insertions, 200 deletions
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 3653d05bc1e..6741e5436d5 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.16 2003/06/11 22:37:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.17 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,7 +73,7 @@
static SlruCtlData ClogCtlData;
static SlruCtl ClogCtl = &ClogCtlData;
-
+
static int ZeroCLOGPage(int pageno, bool writeXlog);
static bool CLOGPagePrecedes(int page1, int page2);
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index 59af2808026..444d2b97d7d 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -3,7 +3,7 @@
*
* Resource managers definition
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.10 2003/02/21 00:06:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.11 2003/08/04 00:43:15 momjian Exp $
*/
#include "postgres.h"
@@ -19,7 +19,7 @@
#include "commands/sequence.h"
-RmgrData RmgrTable[RM_MAX_ID+1] = {
+RmgrData RmgrTable[RM_MAX_ID + 1] = {
{"XLOG", xlog_redo, xlog_undo, xlog_desc, NULL, NULL},
{"Transaction", xact_redo, xact_undo, xact_desc, NULL, NULL},
{"Storage", smgr_redo, smgr_undo, smgr_desc, NULL, NULL},
@@ -32,7 +32,7 @@ RmgrData RmgrTable[RM_MAX_ID+1] = {
{"Reserved 9", NULL, NULL, NULL, NULL, NULL},
{"Heap", heap_redo, heap_undo, heap_desc, NULL, NULL},
{"Btree", btree_redo, btree_undo, btree_desc,
- btree_xlog_startup, btree_xlog_cleanup},
+ btree_xlog_startup, btree_xlog_cleanup},
{"Hash", hash_redo, hash_undo, hash_desc, NULL, NULL},
{"Rtree", rtree_redo, rtree_undo, rtree_desc, NULL, NULL},
{"Gist", gist_redo, gist_undo, gist_desc, NULL, NULL},
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 5129dd3c7e5..1c290f2cf57 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.3 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef enum
SLRU_PAGE_CLEAN, /* page is valid and not dirty */
SLRU_PAGE_DIRTY, /* page is valid but needs write */
SLRU_PAGE_WRITE_IN_PROGRESS /* page is being written out */
-} SlruPageStatus;
+} SlruPageStatus;
/*
* Shared-memory state
@@ -117,7 +117,7 @@ typedef struct SlruSharedData
* swapping out the latest page.
*/
int latest_page_number;
-} SlruSharedData;
+} SlruSharedData;
typedef SlruSharedData *SlruShared;
@@ -145,7 +145,7 @@ typedef enum
SLRU_SEEK_FAILED,
SLRU_READ_FAILED,
SLRU_WRITE_FAILED
-} SlruErrorCause;
+} SlruErrorCause;
static SlruErrorCause slru_errcause;
static int slru_errno;
@@ -166,9 +166,9 @@ SimpleLruShmemSize(void)
{
return MAXALIGN(sizeof(SlruSharedData)) + BLCKSZ * NUM_CLOG_BUFFERS
#ifdef EXEC_BACKEND
- + MAXALIGN(sizeof(SlruLockData))
+ + MAXALIGN(sizeof(SlruLockData))
#endif
- ;
+ ;
}
void
@@ -183,12 +183,14 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
shared = (SlruShared) ptr;
#ifdef EXEC_BACKEND
+
/*
* Locks are in shared memory
*/
- locks = (SlruLock)(ptr + MAXALIGN(sizeof(SlruSharedData)) +
- BLCKSZ * NUM_CLOG_BUFFERS);
+ locks = (SlruLock) (ptr + MAXALIGN(sizeof(SlruSharedData)) +
+ BLCKSZ * NUM_CLOG_BUFFERS);
#else
+
/*
* Locks are in private memory
*/
@@ -199,7 +201,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
if (!IsUnderPostmaster)
- /* Initialize locks and shared memory area */
+ /* Initialize locks and shared memory area */
{
char *bufptr;
int slotno;
@@ -210,8 +212,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
memset(shared, 0, sizeof(SlruSharedData));
- bufptr = (char *)shared + MAXALIGN(sizeof(SlruSharedData));
-
+ bufptr = (char *) shared + MAXALIGN(sizeof(SlruSharedData));
+
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
locks->BufferLocks[slotno] = LWLockAssign();
@@ -247,7 +249,7 @@ int
SimpleLruZeroPage(SlruCtl ctl, int pageno)
{
int slotno;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Find a suitable buffer slot for the page */
slotno = SlruSelectLRUPage(ctl, pageno);
@@ -285,7 +287,7 @@ SimpleLruZeroPage(SlruCtl ctl, int pageno)
char *
SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid, bool forwrite)
{
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Outer loop handles restart if we lose the buffer to someone else */
for (;;)
@@ -383,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno)
{
int pageno;
bool ok;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Do nothing if page does not need writing */
if (shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
@@ -539,13 +541,13 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno)
* possible for this to need to happen when writing a page that's not
* first in its segment; we assume the OS can cope with that. (Note:
* it might seem that it'd be okay to create files only when
- * SimpleLruZeroPage is called for the first page of a segment. However,
- * if after a crash and restart the REDO logic elects to replay the
- * log from a checkpoint before the latest one, then it's possible
- * that we will get commands to set transaction status of transactions
- * that have already been truncated from the commit log. Easiest way
- * to deal with that is to accept references to nonexistent files here
- * and in SlruPhysicalReadPage.)
+ * SimpleLruZeroPage is called for the first page of a segment.
+ * However, if after a crash and restart the REDO logic elects to
+ * replay the log from a checkpoint before the latest one, then it's
+ * possible that we will get commands to set transaction status of
+ * transactions that have already been truncated from the commit log.
+ * Easiest way to deal with that is to accept references to
+ * nonexistent files here and in SlruPhysicalReadPage.)
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
@@ -608,37 +610,37 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
case SLRU_OPEN_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("open of file \"%s\" failed: %m",
path)));
break;
case SLRU_CREATE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("creation of file \"%s\" failed: %m",
path)));
break;
case SLRU_SEEK_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("lseek of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("lseek of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("read of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("read of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("write of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("write of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
default:
/* can't get here, we trust */
@@ -665,6 +667,7 @@ static int
SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
SlruShared shared = (SlruShared) ctl->shared;
+
/* Outer loop handles restart after I/O */
for (;;)
{
@@ -689,7 +692,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
return slotno;
if (shared->page_lru_count[slotno] > bestcount &&
- shared->page_number[slotno] != shared->latest_page_number)
+ shared->page_number[slotno] != shared->latest_page_number)
{
bestslot = slotno;
bestcount = shared->page_lru_count[slotno];
@@ -705,12 +708,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* We need to do I/O. Normal case is that we have to write it
* out, but it's possible in the worst case to have selected a
- * read-busy page. In that case we use SimpleLruReadPage to wait for
- * the read to complete.
+ * read-busy page. In that case we use SimpleLruReadPage to wait
+ * for the read to complete.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
(void) SimpleLruReadPage(ctl, shared->page_number[bestslot],
- InvalidTransactionId, false);
+ InvalidTransactionId, false);
else
SimpleLruWritePage(ctl, bestslot);
@@ -747,10 +750,11 @@ SimpleLruFlush(SlruCtl ctl, bool checkpoint)
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
SimpleLruWritePage(ctl, slotno);
+
/*
- * When called during a checkpoint,
- * we cannot assert that the slot is clean now, since another
- * process might have re-dirtied it already. That's okay.
+ * When called during a checkpoint, we cannot assert that the slot
+ * is clean now, since another process might have re-dirtied it
+ * already. That's okay.
*/
Assert(checkpoint ||
shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
@@ -792,10 +796,10 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
CreateCheckPoint(false, true);
/*
- * Scan shared memory and remove any pages preceding the cutoff
- * page, to ensure we won't rewrite them later. (Any dirty pages
- * should have been flushed already during the checkpoint, we're just
- * being extra careful here.)
+ * Scan shared memory and remove any pages preceding the cutoff page,
+ * to ensure we won't rewrite them later. (Any dirty pages should
+ * have been flushed already during the checkpoint, we're just being
+ * extra careful here.)
*/
LWLockAcquire(ctl->locks->ControlLock, LW_EXCLUSIVE);
@@ -870,7 +874,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (cldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not open directory \"%s\": %m", ctl->Dir)));
errno = 0;
while ((clde = readdir(cldir)) != NULL)
@@ -898,7 +902,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (errno)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not read directory \"%s\": %m", ctl->Dir)));
closedir(cldir);
return found;
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 40b41519a93..550f2ae924b 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.149 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.150 2003/08/04 00:43:15 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -92,7 +92,7 @@
* AbortTransactionBlock
*
* These are invoked only in response to a user "BEGIN WORK", "COMMIT",
- * or "ROLLBACK" command. The tricky part about these functions
+ * or "ROLLBACK" command. The tricky part about these functions
* is that they are called within the postgres main loop, in between
* the StartTransactionCommand() and CommitTransactionCommand().
*
@@ -197,8 +197,8 @@ static TransactionStateData CurrentTransactionStateData = {
0, /* scan command id */
0x0, /* start time */
TRANS_DEFAULT, /* transaction state */
- TBLOCK_DEFAULT /* transaction block state from
- the client perspective */
+ TBLOCK_DEFAULT /* transaction block state from the client
+ * perspective */
};
TransactionState CurrentTransactionState = &CurrentTransactionStateData;
@@ -359,7 +359,7 @@ GetCurrentTransactionStartTimeUsec(int *msec)
* TransactionIdIsCurrentTransactionId
*
* During bootstrap, we cheat and say "it's not my transaction ID" even though
- * it is. Along with transam.c's cheat to say that the bootstrap XID is
+ * it is. Along with transam.c's cheat to say that the bootstrap XID is
* already committed, this causes the tqual.c routines to see previously
* inserted tuples as committed, which is what we need during bootstrap.
*/
@@ -561,13 +561,13 @@ RecordTransactionCommit(void)
/*
* We must mark the transaction committed in clog if its XID
- * appears either in permanent rels or in local temporary rels.
- * We test this by seeing if we made transaction-controlled
- * entries *OR* local-rel tuple updates. Note that if we made
- * only the latter, we have not emitted an XLOG record for our
- * commit, and so in the event of a crash the clog update might be
- * lost. This is okay because no one else will ever care whether
- * we committed.
+ * appears either in permanent rels or in local temporary rels. We
+ * test this by seeing if we made transaction-controlled entries
+ * *OR* local-rel tuple updates. Note that if we made only the
+ * latter, we have not emitted an XLOG record for our commit, and
+ * so in the event of a crash the clog update might be lost. This
+ * is okay because no one else will ever care whether we
+ * committed.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
TransactionIdCommit(xid);
@@ -755,9 +755,9 @@ AtAbort_Memory(void)
{
/*
* Make sure we are in a valid context (not a child of
- * TopTransactionContext...). Note that it is possible for this
- * code to be called when we aren't in a transaction at all; go
- * directly to TopMemoryContext in that case.
+ * TopTransactionContext...). Note that it is possible for this code
+ * to be called when we aren't in a transaction at all; go directly to
+ * TopMemoryContext in that case.
*/
if (TopTransactionContext != NULL)
{
@@ -891,8 +891,8 @@ CommitTransaction(void)
DeferredTriggerEndXact();
/*
- * Similarly, let ON COMMIT management do its thing before we start
- * to commit.
+ * Similarly, let ON COMMIT management do its thing before we start to
+ * commit.
*/
PreCommit_on_commit_actions();
@@ -953,10 +953,10 @@ CommitTransaction(void)
* noncritical resource releasing.
*
* The ordering of operations is not entirely random. The idea is:
- * release resources visible to other backends (eg, files, buffer pins);
- * then release locks; then release backend-local resources. We want
- * to release locks at the point where any backend waiting for us will
- * see our transaction as being fully cleaned up.
+ * release resources visible to other backends (eg, files, buffer
+ * pins); then release locks; then release backend-local resources.
+ * We want to release locks at the point where any backend waiting for
+ * us will see our transaction as being fully cleaned up.
*/
smgrDoPendingDeletes(true);
@@ -1064,7 +1064,7 @@ AbortTransaction(void)
}
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering.
*/
@@ -1194,8 +1194,8 @@ StartTransactionCommand(void)
}
/*
- * We must switch to TopTransactionContext before returning. This
- * is already done if we called StartTransaction, otherwise not.
+ * We must switch to TopTransactionContext before returning. This is
+ * already done if we called StartTransaction, otherwise not.
*/
Assert(TopTransactionContext != NULL);
MemoryContextSwitchTo(TopTransactionContext);
@@ -1370,9 +1370,10 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (IsTransactionBlock())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s cannot run inside a transaction block",
stmtType)));
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1381,8 +1382,8 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (!MemoryContextContains(QueryContext, stmtNode))
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
- errmsg("%s cannot be executed from a function", stmtType)));
+ /* translator: %s represents an SQL statement name */
+ errmsg("%s cannot be executed from a function", stmtType)));
/* If we got past IsTransactionBlock test, should be in default state */
if (CurrentTransactionState->blockState != TBLOCK_DEFAULT)
elog(ERROR, "cannot prevent transaction chain");
@@ -1414,6 +1415,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
*/
if (IsTransactionBlock())
return;
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1423,7 +1425,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
return;
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s may only be used in BEGIN/END transaction blocks",
stmtType)));
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 0ceb8951cbe..45a2743ba97 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.120 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.121 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1046,8 +1046,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
@@ -1162,8 +1162,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
if (openLogFile < 0)
@@ -1266,7 +1266,7 @@ XLogFlush(XLogRecPtr record)
XLogCtlInsert *Insert = &XLogCtl->Insert;
uint32 freespace = INSERT_FREESPACE(Insert);
- if (freespace < SizeOfXLogRecord) /* buffer is full */
+ if (freespace < SizeOfXLogRecord) /* buffer is full */
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
else
{
@@ -1449,8 +1449,8 @@ XLogFileInit(uint32 log, uint32 seg,
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
@@ -1563,14 +1563,14 @@ XLogFileOpen(uint32 log, uint32 seg, bool econt)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
}
return (fd);
@@ -1621,8 +1621,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (xldir == NULL)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not open transaction log directory \"%s\": %m",
+ XLogDir)));
sprintf(lastoff, "%08X%08X", log, seg);
@@ -1654,15 +1654,15 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
true))
{
ereport(LOG,
- (errmsg("recycled transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("recycled transaction log file \"%s\"",
+ xlde->d_name)));
}
else
{
/* No need for any more future segments... */
ereport(LOG,
- (errmsg("removing transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("removing transaction log file \"%s\"",
+ xlde->d_name)));
unlink(path);
}
}
@@ -1672,8 +1672,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (errno)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not read transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not read transaction log directory \"%s\": %m",
+ XLogDir)));
closedir(xldir);
}
@@ -1746,8 +1746,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(record->xl_crc, crc))
{
ereport(emode,
- (errmsg("bad resource manager data checksum in record at %X/%X",
- recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad resource manager data checksum in record at %X/%X",
+ recptr.xlogid, recptr.xrecoff)));
return (false);
}
@@ -1769,8 +1769,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(cbuf, crc))
{
ereport(emode,
- (errmsg("bad checksum of backup block %d in record at %X/%X",
- i + 1, recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad checksum of backup block %d in record at %X/%X",
+ i + 1, recptr.xlogid, recptr.xrecoff)));
return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
@@ -1931,7 +1931,7 @@ got_record:;
{
ereport(emode,
(errmsg("invalid resource manager id %u at %X/%X",
- record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
+ record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
nextRecord = NULL;
@@ -2063,7 +2063,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
{
ereport(emode,
(errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
- hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
+ hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
readId, readSeg, readOff)));
return false;
}
@@ -2084,7 +2084,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_sui > lastReadSUI + 512)
{
ereport(emode,
- /* translator: SUI = startup id */
+ /* translator: SUI = startup id */
(errmsg("out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
hdr->xlp_sui, lastReadSUI,
readId, readSeg, readOff)));
@@ -2235,8 +2235,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
- " but the server was compiled with PG_CONTROL_VERSION %d.",
- ControlFile->pg_control_version, PG_CONTROL_VERSION),
+ " but the server was compiled with PG_CONTROL_VERSION %d.",
+ ControlFile->pg_control_version, PG_CONTROL_VERSION),
errhint("It looks like you need to initdb.")));
/* Now check the CRC. */
INIT_CRC64(crc);
@@ -2265,75 +2265,75 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
- " but the server was compiled with CATALOG_VERSION_NO %d.",
- ControlFile->catalog_version_no, CATALOG_VERSION_NO),
+ " but the server was compiled with CATALOG_VERSION_NO %d.",
+ ControlFile->catalog_version_no, CATALOG_VERSION_NO),
errhint("It looks like you need to initdb.")));
if (ControlFile->blcksz != BLCKSZ)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with BLCKSZ %d,"
- " but the server was compiled with BLCKSZ %d.",
- ControlFile->blcksz, BLCKSZ),
- errhint("It looks like you need to recompile or initdb.")));
+ errdetail("The database cluster was initialized with BLCKSZ %d,"
+ " but the server was compiled with BLCKSZ %d.",
+ ControlFile->blcksz, BLCKSZ),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->relseg_size != RELSEG_SIZE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
- " but the server was compiled with RELSEG_SIZE %d.",
+ " but the server was compiled with RELSEG_SIZE %d.",
ControlFile->relseg_size, RELSEG_SIZE),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->nameDataLen != NAMEDATALEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with NAMEDATALEN %d,"
- " but the server was compiled with NAMEDATALEN %d.",
+ " but the server was compiled with NAMEDATALEN %d.",
ControlFile->nameDataLen, NAMEDATALEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with FUNC_MAX_ARGS %d,"
- " but the server was compiled with FUNC_MAX_ARGS %d.",
+ " but the server was compiled with FUNC_MAX_ARGS %d.",
ControlFile->funcMaxArgs, FUNC_MAX_ARGS),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
if (ControlFile->enableIntTimes != TRUE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
- " but the server was compiled with HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled with HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->enableIntTimes != FALSE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
- " but the server was compiled without HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled without HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#endif
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
- " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
+ " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_collate),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_collate),
+ errhint("It looks like you need to initdb or install locale support.")));
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_ctype),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_ctype),
+ errhint("It looks like you need to initdb or install locale support.")));
/* Make the fixed locale settings visible as GUC variables, too */
SetConfigOption("lc_collate", ControlFile->lc_collate,
@@ -2602,10 +2602,10 @@ StartupXLOG(void)
str_time(ControlFile->time))));
else if (ControlFile->state == DB_IN_RECOVERY)
ereport(LOG,
- (errmsg("database system was interrupted while in recovery at %s",
- str_time(ControlFile->time)),
- errhint("This probably means that some data is corrupted and"
- " you will have to use the last backup for recovery.")));
+ (errmsg("database system was interrupted while in recovery at %s",
+ str_time(ControlFile->time)),
+ errhint("This probably means that some data is corrupted and"
+ " you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted at %s",
@@ -2637,12 +2637,12 @@ StartupXLOG(void)
checkPointLoc = ControlFile->prevCheckPoint;
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
ereport(PANIC,
- (errmsg("could not locate a valid checkpoint record")));
+ (errmsg("could not locate a valid checkpoint record")));
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
@@ -2665,11 +2665,12 @@ StartupXLOG(void)
ShmemVariableCache->oidCount = 0;
/*
- * If it was a shutdown checkpoint, then any following WAL entries were
- * created under the next StartUpID; if it was a regular checkpoint then
- * any following WAL entries were created under the same StartUpID.
- * We must replay WAL entries using the same StartUpID they were created
- * under, so temporarily adopt that SUI (see also xlog_redo()).
+ * If it was a shutdown checkpoint, then any following WAL entries
+ * were created under the next StartUpID; if it was a regular
+ * checkpoint then any following WAL entries were created under the
+ * same StartUpID. We must replay WAL entries using the same StartUpID
+ * they were created under, so temporarily adopt that SUI (see also
+ * xlog_redo()).
*/
if (wasShutdown)
ThisStartUpID = checkPoint.ThisStartUpID + 1;
@@ -2690,7 +2691,7 @@ StartupXLOG(void)
{
if (wasShutdown)
ereport(PANIC,
- (errmsg("invalid redo/undo record in shutdown checkpoint")));
+ (errmsg("invalid redo/undo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@@ -2699,7 +2700,7 @@ StartupXLOG(void)
/* REDO */
if (InRecovery)
{
- int rmid;
+ int rmid;
ereport(LOG,
(errmsg("database system was not properly shut down; "
@@ -2791,8 +2792,8 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the
- * LastRec record spans, not the one it starts in. The last block
- * is indeed the one we want to use.
+ * LastRec record spans, not the one it starts in. The last block is
+ * indeed the one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -2818,11 +2819,12 @@ StartupXLOG(void)
else
{
/*
- * Whenever Write.LogwrtResult points to exactly the end of a page,
- * Write.curridx must point to the *next* page (see XLogWrite()).
+ * Whenever Write.LogwrtResult points to exactly the end of a
+ * page, Write.curridx must point to the *next* page (see
+ * XLogWrite()).
*
- * Note: it might seem we should do AdvanceXLInsertBuffer() here,
- * but we can't since we haven't yet determined the correct StartUpID
+ * Note: it might seem we should do AdvanceXLInsertBuffer() here, but
+ * we can't since we haven't yet determined the correct StartUpID
* to put into the new page's header. The first actual attempt to
* insert a log record will advance the insert state.
*/
@@ -2859,7 +2861,7 @@ StartupXLOG(void)
if (InRecovery)
{
- int rmid;
+ int rmid;
/*
* Allow resource managers to do any required cleanup.
@@ -2885,14 +2887,15 @@ StartupXLOG(void)
ThisStartUpID = ControlFile->checkPointCopy.ThisStartUpID;
/*
- * Perform a new checkpoint to update our recovery activity to disk.
+ * Perform a new checkpoint to update our recovery activity to
+ * disk.
*
* Note that we write a shutdown checkpoint. This is correct since
- * the records following it will use SUI one more than what is shown
- * in the checkpoint's ThisStartUpID.
+ * the records following it will use SUI one more than what is
+ * shown in the checkpoint's ThisStartUpID.
*
- * In case we had to use the secondary checkpoint, make sure that
- * it will still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it
+ * will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -2907,10 +2910,10 @@ StartupXLOG(void)
else
{
/*
- * If we are not doing recovery, then we saw a checkpoint with nothing
- * after it, and we can safely use StartUpID equal to one more than
- * the checkpoint's SUI. But just for paranoia's sake, check against
- * pg_control too.
+ * If we are not doing recovery, then we saw a checkpoint with
+ * nothing after it, and we can safely use StartUpID equal to one
+ * more than the checkpoint's SUI. But just for paranoia's sake,
+ * check against pg_control too.
*/
ThisStartUpID = checkPoint.ThisStartUpID;
if (ThisStartUpID < ControlFile->checkPointCopy.ThisStartUpID)
@@ -2923,7 +2926,8 @@ StartupXLOG(void)
PreallocXlogFiles(EndOfLog);
/*
- * Advance StartUpID to one more than the highest value used previously.
+ * Advance StartUpID to one more than the highest value used
+ * previously.
*/
ThisStartUpID++;
XLogCtl->ThisStartUpID = ThisStartUpID;
@@ -2973,9 +2977,9 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (!XRecOffIsValid(RecPtr.xrecoff))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint link in control file",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
@@ -2984,34 +2988,34 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (record == NULL)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_rmid != RM_XLOG_ID)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
- (errmsg("invalid resource manager id in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid resource manager id in %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
record->xl_info != XLOG_CHECKPOINT_ONLINE)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid xl_info in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_len != sizeof(CheckPoint))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid length of %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
return record;
@@ -3112,10 +3116,11 @@ CreateCheckPoint(bool shutdown, bool force)
if (MyXactMadeXLogEntry)
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("checkpoint cannot be made inside transaction block")));
+ errmsg("checkpoint cannot be made inside transaction block")));
/*
- * Acquire CheckpointLock to ensure only one checkpoint happens at a time.
+ * Acquire CheckpointLock to ensure only one checkpoint happens at a
+ * time.
*
* The CheckpointLock can be held for quite a while, which is not good
* because we won't respond to a cancel/die request while waiting for
@@ -3149,14 +3154,15 @@ CreateCheckPoint(bool shutdown, bool force)
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
/*
- * If this isn't a shutdown or forced checkpoint, and we have not inserted
- * any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
- * when the system is idle. That wastes log space, and more importantly it
- * exposes us to possible loss of both current and previous checkpoint
- * records if the machine crashes just as we're writing the update.
- * (Perhaps it'd make even more sense to checkpoint only when the previous
- * checkpoint record is in a different xlog page?)
+ * If this isn't a shutdown or forced checkpoint, and we have not
+ * inserted any XLOG records since the start of the last checkpoint,
+ * skip the checkpoint. The idea here is to avoid inserting duplicate
+ * checkpoints when the system is idle. That wastes log space, and
+ * more importantly it exposes us to possible loss of both current and
+ * previous checkpoint records if the machine crashes just as we're
+ * writing the update. (Perhaps it'd make even more sense to
+ * checkpoint only when the previous checkpoint record is in a
+ * different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must
@@ -3204,12 +3210,13 @@ CreateCheckPoint(bool shutdown, bool force)
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock AND the info_lck.
*
- * Note: if we fail to complete the checkpoint, RedoRecPtr will be
- * left pointing past where it really needs to point. This is okay;
- * the only consequence is that XLogInsert might back up whole buffers
- * that it didn't really need to. We can't postpone advancing RedoRecPtr
- * because XLogInserts that happen while we are dumping buffers must
- * assume that their buffer changes are not included in the checkpoint.
+ * Note: if we fail to complete the checkpoint, RedoRecPtr will be left
+ * pointing past where it really needs to point. This is okay; the
+ * only consequence is that XLogInsert might back up whole buffers
+ * that it didn't really need to. We can't postpone advancing
+ * RedoRecPtr because XLogInserts that happen while we are dumping
+ * buffers must assume that their buffer changes are not included in
+ * the checkpoint.
*/
{
/* use volatile pointer to prevent code rearrangement */
@@ -3538,15 +3545,15 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
if (open_sync_bit != new_sync_bit)
{
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
}
@@ -3570,16 +3577,16 @@ issue_xlog_fsync(void)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#ifdef HAVE_FDATASYNC
case SYNC_METHOD_FDATASYNC:
if (pg_fdatasync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fdatasync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fdatasync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#endif
case SYNC_METHOD_OPEN: