aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/file/fd.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/file/fd.c')
-rw-r--r--src/backend/storage/file/fd.c48
1 files changed, 41 insertions, 7 deletions
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 2d75773ef02..827a1e2620b 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -145,6 +145,8 @@ int max_files_per_process = 1000;
*/
int max_safe_fds = 32; /* default if not changed */
+/* Whether it is safe to continue running after fsync() fails. */
+bool data_sync_retry = false;
/* Debugging.... */
@@ -430,11 +432,9 @@ pg_flush_data(int fd, off_t offset, off_t nbytes)
*/
rc = sync_file_range(fd, offset, nbytes,
SYNC_FILE_RANGE_WRITE);
-
- /* don't error out, this is just a performance optimization */
if (rc != 0)
{
- ereport(WARNING,
+ ereport(data_sync_elevel(WARNING),
(errcode_for_file_access(),
errmsg("could not flush dirty data: %m")));
}
@@ -506,7 +506,7 @@ pg_flush_data(int fd, off_t offset, off_t nbytes)
rc = msync(p, (size_t) nbytes, MS_ASYNC);
if (rc != 0)
{
- ereport(WARNING,
+ ereport(data_sync_elevel(WARNING),
(errcode_for_file_access(),
errmsg("could not flush dirty data: %m")));
/* NB: need to fall through to munmap()! */
@@ -562,7 +562,7 @@ pg_flush_data(int fd, off_t offset, off_t nbytes)
void
fsync_fname(const char *fname, bool isdir)
{
- fsync_fname_ext(fname, isdir, false, ERROR);
+ fsync_fname_ext(fname, isdir, false, data_sync_elevel(ERROR));
}
/*
@@ -1022,7 +1022,8 @@ LruDelete(File file)
* to leak the FD than to mess up our internal state.
*/
if (close(vfdP->fd))
- elog(LOG, "could not close file \"%s\": %m", vfdP->fileName);
+ elog(vfdP->fdstate & FD_TEMP_FILE_LIMIT ? LOG : data_sync_elevel(LOG),
+ "could not close file \"%s\": %m", vfdP->fileName);
vfdP->fd = VFD_CLOSED;
--nfile;
@@ -1698,7 +1699,14 @@ FileClose(File file)
{
/* close the file */
if (close(vfdP->fd))
- elog(LOG, "could not close file \"%s\": %m", vfdP->fileName);
+ {
+ /*
+ * We may need to panic on failure to close non-temporary files;
+ * see LruDelete.
+ */
+ elog(vfdP->fdstate & FD_TEMP_FILE_LIMIT ? LOG : data_sync_elevel(LOG),
+ "could not close file \"%s\": %m", vfdP->fileName);
+ }
--nfile;
vfdP->fd = VFD_CLOSED;
@@ -3091,6 +3099,9 @@ looks_like_temp_rel_name(const char *name)
* harmless cases such as read-only files in the data directory, and that's
* not good either.
*
+ * Note that if we previously crashed due to a PANIC on fsync(), we'll be
+ * rewriting all changes again during recovery.
+ *
* Note we assume we're chdir'd into PGDATA to begin with.
*/
void
@@ -3413,3 +3424,26 @@ MakePGDirectory(const char *directoryName)
{
return mkdir(directoryName, pg_dir_create_mode);
}
+
+/*
+ * Return the passed-in error level, or PANIC if data_sync_retry is off.
+ *
+ * Failure to fsync any data file is cause for immediate panic, unless
+ * data_sync_retry is enabled. Data may have been written to the operating
+ * system and removed from our buffer pool already, and if we are running on
+ * an operating system that forgets dirty data on write-back failure, there
+ * may be only one copy of the data remaining: in the WAL. A later attempt to
+ * fsync again might falsely report success. Therefore we must not allow any
+ * further checkpoints to be attempted. data_sync_retry can in theory be
+ * enabled on systems known not to drop dirty buffered data on write-back
+ * failure (with the likely outcome that checkpoints will continue to fail
+ * until the underlying problem is fixed).
+ *
+ * Any code that reports a failure from fsync() or related functions should
+ * filter the error level with this function.
+ */
+int
+data_sync_elevel(int elevel)
+{
+ return data_sync_retry ? elevel : PANIC;
+}