aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/freespace/freespace.c24
-rw-r--r--src/backend/storage/lmgr/deadlock.c4
-rw-r--r--src/backend/storage/lmgr/s_lock.c40
3 files changed, 35 insertions, 33 deletions
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 7dd286b8936..94578c4d9b9 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.20 2003/08/04 02:40:03 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.21 2003/08/08 21:41:59 momjian Exp $
*
*
* NOTES:
@@ -144,7 +144,7 @@ typedef struct FsmCacheFileHeader
uint32 endian;
uint32 version;
int32 numRels;
-} FsmCacheFileHeader;
+} FsmCacheFileHeader;
/* Per-relation header */
typedef struct FsmCacheRelHeader
@@ -154,7 +154,7 @@ typedef struct FsmCacheRelHeader
uint32 avgRequest; /* moving average of space requests */
int32 lastPageCount; /* pages passed to RecordRelationFreeSpace */
int32 storedPages; /* # of pages stored in arena */
-} FsmCacheRelHeader;
+} FsmCacheRelHeader;
/*
@@ -236,10 +236,10 @@ static bool lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
int *outPageIndex);
static void compact_fsm_storage(void);
static void push_fsm_rels_after(FSMRelation *afterRel);
-static void pack_incoming_pages(FSMPageData * newLocation, int newPages,
- PageFreeSpaceInfo * pageSpaces, int nPages);
-static void pack_existing_pages(FSMPageData * newLocation, int newPages,
- FSMPageData * oldLocation, int oldPages);
+static void pack_incoming_pages(FSMPageData *newLocation, int newPages,
+ PageFreeSpaceInfo *pageSpaces, int nPages);
+static void pack_existing_pages(FSMPageData *newLocation, int newPages,
+ FSMPageData *oldLocation, int oldPages);
static int fsm_calc_request(FSMRelation *fsmrel);
static int fsm_calc_target_allocation(int myRequest);
static int fsm_current_chunks(FSMRelation *fsmrel);
@@ -459,7 +459,7 @@ GetAvgFSMRequestSize(RelFileNode *rel)
void
RecordRelationFreeSpace(RelFileNode *rel,
int nPages,
- PageFreeSpaceInfo * pageSpaces)
+ PageFreeSpaceInfo *pageSpaces)
{
FSMRelation *fsmrel;
@@ -1560,8 +1560,8 @@ push_fsm_rels_after(FSMRelation *afterRel)
#define HISTOGRAM_BINS 64
static void
-pack_incoming_pages(FSMPageData * newLocation, int newPages,
- PageFreeSpaceInfo * pageSpaces, int nPages)
+pack_incoming_pages(FSMPageData *newLocation, int newPages,
+ PageFreeSpaceInfo *pageSpaces, int nPages)
{
int histogram[HISTOGRAM_BINS];
int above,
@@ -1630,8 +1630,8 @@ pack_incoming_pages(FSMPageData * newLocation, int newPages,
* so that we can copy data moving forward in the arrays without problem.
*/
static void
-pack_existing_pages(FSMPageData * newLocation, int newPages,
- FSMPageData * oldLocation, int oldPages)
+pack_existing_pages(FSMPageData *newLocation, int newPages,
+ FSMPageData *oldLocation, int oldPages)
{
int histogram[HISTOGRAM_BINS];
int above,
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index b9125ea57ae..4ed712bcec6 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.23 2003/08/04 02:40:03 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.24 2003/08/08 21:42:00 momjian Exp $
*
* Interface:
*
@@ -61,7 +61,7 @@ typedef struct
LOCKTAG locktag; /* ID of awaited lock object */
LOCKMODE lockmode; /* type of lock we're waiting for */
int pid; /* PID of blocked backend */
-} DEADLOCK_INFO;
+} DEADLOCK_INFO;
static bool DeadLockCheckRecurse(PGPROC *proc);
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index de6b355aace..3124feb798e 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/s_lock.c,v 1.15 2003/08/06 16:43:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/s_lock.c,v 1.16 2003/08/08 21:42:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,26 +54,27 @@ s_lock(volatile slock_t *lock, const char *file, int line)
* finish 100 iterations. However, on a uniprocessor, the tight loop
* is just a waste of cycles, so don't iterate thousands of times.
*
- * Once we do decide to block, we use randomly increasing select() delays.
- * The first delay is 10 msec, then the delay randomly increases to about
- * one second, after which we reset to 10 msec and start again. The idea
- * here is that in the presence of heavy contention we need to increase
- * the delay, else the spinlock holder may never get to run and release
- * the lock. (Consider situation where spinlock holder has been nice'd
- * down in priority by the scheduler --- it will not get scheduled until
- * all would-be acquirers are sleeping, so if we always use a 10-msec
- * sleep, there is a real possibility of starvation.) But we can't just
- * clamp the delay to an upper bound, else it would take a long time to
- * make a reasonable number of tries.
+ * Once we do decide to block, we use randomly increasing select()
+ * delays. The first delay is 10 msec, then the delay randomly
+ * increases to about one second, after which we reset to 10 msec and
+ * start again. The idea here is that in the presence of heavy
+ * contention we need to increase the delay, else the spinlock holder
+ * may never get to run and release the lock. (Consider situation
+ * where spinlock holder has been nice'd down in priority by the
+ * scheduler --- it will not get scheduled until all would-be
+ * acquirers are sleeping, so if we always use a 10-msec sleep, there
+ * is a real possibility of starvation.) But we can't just clamp the
+ * delay to an upper bound, else it would take a long time to make a
+ * reasonable number of tries.
*
* We time out and declare error after NUM_DELAYS delays (thus, exactly
* that many tries). With the given settings, this will usually take
- * 3 or so minutes. It seems better to fix the total number of tries (and
- * thus the probability of unintended failure) than to fix the total time
- * spent.
+ * 3 or so minutes. It seems better to fix the total number of tries
+ * (and thus the probability of unintended failure) than to fix the
+ * total time spent.
*
- * The select() delays are measured in centiseconds (0.01 sec) because
- * 10 msec is a common resolution limit at the OS level.
+ * The select() delays are measured in centiseconds (0.01 sec) because 10
+ * msec is a common resolution limit at the OS level.
*/
#define SPINS_PER_DELAY 100
#define NUM_DELAYS 1000
@@ -97,12 +98,13 @@ s_lock(volatile slock_t *lock, const char *file, int line)
(void) select(0, NULL, NULL, NULL, &delay);
#if defined(S_LOCK_TEST)
- fprintf(stdout, "*"); fflush(stdout);
+ fprintf(stdout, "*");
+ fflush(stdout);
#endif
/* increase delay by a random fraction between 1X and 2X */
cur_delay += (int) (cur_delay *
- (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
+ (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (cur_delay > MAX_DELAY_CSEC)
cur_delay = MIN_DELAY_CSEC;