aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/buffer')
-rw-r--r--src/backend/storage/buffer/buf_init.c15
-rw-r--r--src/backend/storage/buffer/buf_table.c6
-rw-r--r--src/backend/storage/buffer/bufmgr.c116
-rw-r--r--src/backend/storage/buffer/localbuf.c16
-rw-r--r--src/backend/storage/buffer/s_lock.c47
5 files changed, 100 insertions, 100 deletions
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index ff3d43fe8c0..03d6504db86 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.41 2001/01/24 19:43:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.42 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,8 +63,8 @@ long *PrivateRefCount; /* also used in freelist.c */
bits8 *BufferLocks; /* flag bits showing locks I have set */
BufferTag *BufferTagLastDirtied; /* tag buffer had when last
* dirtied by me */
-BufferBlindId *BufferBlindLastDirtied;
-bool *BufferDirtiedByMe; /* T if buf has been dirtied in cur xact */
+BufferBlindId *BufferBlindLastDirtied;
+bool *BufferDirtiedByMe; /* T if buf has been dirtied in cur xact */
/*
@@ -149,7 +149,8 @@ InitBufferPool(void)
/*
* It's probably not really necessary to grab the lock --- if there's
- * anyone else attached to the shmem at this point, we've got problems.
+ * anyone else attached to the shmem at this point, we've got
+ * problems.
*/
SpinAcquire(BufMgrLock);
@@ -240,13 +241,11 @@ InitBufferPoolAccess(void)
BufferDirtiedByMe = (bool *) calloc(NBuffers, sizeof(bool));
/*
- * Convert shmem offsets into addresses as seen by this process.
- * This is just to speed up the BufferGetBlock() macro.
+ * Convert shmem offsets into addresses as seen by this process. This
+ * is just to speed up the BufferGetBlock() macro.
*/
for (i = 0; i < NBuffers; i++)
- {
BufferBlockPointers[i] = (Block) MAKE_PTR(BufferDescriptors[i].data);
- }
/*
* Now that buffer access is initialized, set up a callback to shut it
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index f1512e0563e..9e8f1647786 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.20 2001/01/24 19:43:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.21 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -123,8 +123,8 @@ BufTableDelete(BufferDesc *buf)
/*
* Clear the buffer's tag. This doesn't matter for the hash table,
* since the buffer is already removed from it, but it ensures that
- * sequential searches through the buffer table won't think the
- * buffer is still valid for its old page.
+ * sequential searches through the buffer table won't think the buffer
+ * is still valid for its old page.
*/
buf->tag.rnode.relNode = InvalidOid;
buf->tag.rnode.tblNode = InvalidOid;
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index b247248a80c..45dcdaed6a9 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.108 2001/03/21 10:13:29 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.109 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -401,7 +401,7 @@ BufferAlloc(Relation reln,
bool smok;
/*
- * skip write error buffers
+ * skip write error buffers
*/
if ((buf->flags & BM_IO_ERROR) != 0)
{
@@ -409,6 +409,7 @@ BufferAlloc(Relation reln,
buf = (BufferDesc *) NULL;
continue;
}
+
/*
* Set BM_IO_IN_PROGRESS to keep anyone from doing anything
* with the contents of the buffer while we write it out. We
@@ -453,6 +454,7 @@ BufferAlloc(Relation reln,
}
else
{
+
/*
* BM_JUST_DIRTIED cleared by BufferReplace and shouldn't
* be setted by anyone. - vadim 01/17/97
@@ -689,9 +691,7 @@ ReleaseAndReadBuffer(Buffer buffer,
bufHdr = &BufferDescriptors[buffer - 1];
Assert(PrivateRefCount[buffer - 1] > 0);
if (PrivateRefCount[buffer - 1] > 1)
- {
PrivateRefCount[buffer - 1]--;
- }
else
{
SpinAcquire(BufMgrLock);
@@ -724,7 +724,7 @@ BufferSync()
BufferDesc *bufHdr;
Buffer buffer;
int status;
- RelFileNode rnode;
+ RelFileNode rnode;
XLogRecPtr recptr;
Relation reln = NULL;
@@ -754,8 +754,8 @@ BufferSync()
}
/*
- * IO synchronization. Note that we do it with unpinned buffer
- * to avoid conflicts with FlushRelationBuffers.
+ * IO synchronization. Note that we do it with unpinned buffer to
+ * avoid conflicts with FlushRelationBuffers.
*/
if (bufHdr->flags & BM_IO_IN_PROGRESS)
{
@@ -769,12 +769,12 @@ BufferSync()
}
/*
- * Here: no one doing IO for this buffer and it's dirty.
- * Pin buffer now and set IO state for it *before* acquiring
- * shlock to avoid conflicts with FlushRelationBuffers.
+ * Here: no one doing IO for this buffer and it's dirty. Pin
+ * buffer now and set IO state for it *before* acquiring shlock to
+ * avoid conflicts with FlushRelationBuffers.
*/
PinBuffer(bufHdr);
- StartBufferIO(bufHdr, false); /* output IO start */
+ StartBufferIO(bufHdr, false); /* output IO start */
buffer = BufferDescriptorGetBuffer(bufHdr);
rnode = bufHdr->tag.rnode;
@@ -810,16 +810,16 @@ BufferSync()
if (reln == (Relation) NULL)
{
status = smgrblindwrt(DEFAULT_SMGR,
- bufHdr->tag.rnode,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data),
- true); /* must fsync */
+ bufHdr->tag.rnode,
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data),
+ true); /* must fsync */
}
else
{
status = smgrwrite(DEFAULT_SMGR, reln,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data));
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data));
}
if (status == SM_FAIL) /* disk failure ?! */
@@ -827,9 +827,9 @@ BufferSync()
bufHdr->tag.blockNum, bufHdr->blind.relname);
/*
- * Note that it's safe to change cntxDirty here because of
- * we protect it from upper writers by share lock and from
- * other bufmgr routines by BM_IO_IN_PROGRESS
+ * Note that it's safe to change cntxDirty here because of we
+ * protect it from upper writers by share lock and from other
+ * bufmgr routines by BM_IO_IN_PROGRESS
*/
bufHdr->cntxDirty = false;
@@ -842,12 +842,11 @@ BufferSync()
SpinAcquire(BufMgrLock);
bufHdr->flags &= ~BM_IO_IN_PROGRESS; /* mark IO finished */
- TerminateBufferIO(bufHdr); /* Sync IO finished */
+ TerminateBufferIO(bufHdr); /* Sync IO finished */
/*
- * If this buffer was marked by someone as DIRTY while
- * we were flushing it out we must not clear DIRTY
- * flag - vadim 01/17/97
+ * If this buffer was marked by someone as DIRTY while we were
+ * flushing it out we must not clear DIRTY flag - vadim 01/17/97
*/
if (!(bufHdr->flags & BM_JUST_DIRTIED))
bufHdr->flags &= ~BM_DIRTY;
@@ -1020,6 +1019,7 @@ void
BufmgrCommit(void)
{
LocalBufferSync();
+
/*
* All files created in current transaction will be fsync-ed
*/
@@ -1065,8 +1065,8 @@ BufferReplace(BufferDesc *bufHdr)
SpinRelease(BufMgrLock);
/*
- * No need to lock buffer context - no one should be able to
- * end ReadBuffer
+ * No need to lock buffer context - no one should be able to end
+ * ReadBuffer
*/
recptr = BufferGetLSN(bufHdr);
XLogFlush(recptr);
@@ -1113,8 +1113,8 @@ BlockNumber
RelationGetNumberOfBlocks(Relation relation)
{
return ((relation->rd_myxactonly) ? relation->rd_nblocks :
- ((relation->rd_rel->relkind == RELKIND_VIEW) ? 0 :
- smgrnblocks(DEFAULT_SMGR, relation)));
+ ((relation->rd_rel->relkind == RELKIND_VIEW) ? 0 :
+ smgrnblocks(DEFAULT_SMGR, relation)));
}
/* ---------------------------------------------------------------------
@@ -1122,7 +1122,7 @@ RelationGetNumberOfBlocks(Relation relation)
*
* This function removes all the buffered pages for a relation
* from the buffer pool. Dirty pages are simply dropped, without
- * bothering to write them out first. This is NOT rollback-able,
+ * bothering to write them out first. This is NOT rollback-able,
* and so should be used only with extreme caution!
*
* We assume that the caller holds an exclusive lock on the relation,
@@ -1196,6 +1196,7 @@ recheck:
bufHdr->refcount == 1);
ReleaseBufferWithBufferLock(i);
}
+
/*
* And mark the buffer as no longer occupied by this rel.
*/
@@ -1212,7 +1213,7 @@ recheck:
* This is the same as DropRelationBuffers, except that the target
* relation is specified by RelFileNode.
*
- * This is NOT rollback-able. One legitimate use is to clear the
+ * This is NOT rollback-able. One legitimate use is to clear the
* buffer cache of buffers for a relation that is being deleted
* during transaction abort.
* --------------------------------------------------------------------
@@ -1278,6 +1279,7 @@ recheck:
bufHdr->refcount == 1);
ReleaseBufferWithBufferLock(i);
}
+
/*
* And mark the buffer as no longer occupied by this rel.
*/
@@ -1293,7 +1295,7 @@ recheck:
*
* This function removes all the buffers in the buffer cache for a
* particular database. Dirty pages are simply dropped, without
- * bothering to write them out first. This is used when we destroy a
+ * bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
* tree no longer exists. Implementation is pretty similar to
* DropRelationBuffers() which is for destroying just one relation.
@@ -1310,10 +1312,11 @@ DropBuffers(Oid dbid)
{
bufHdr = &BufferDescriptors[i - 1];
recheck:
+
/*
- * We know that currently database OID is tblNode but
- * this probably will be changed in future and this
- * func will be used to drop tablespace buffers.
+ * We know that currently database OID is tblNode but this
+ * probably will be changed in future and this func will be used
+ * to drop tablespace buffers.
*/
if (bufHdr->tag.rnode.tblNode == dbid)
{
@@ -1342,6 +1345,7 @@ recheck:
* backends are running in that database.
*/
Assert(bufHdr->flags & BM_FREE);
+
/*
* And mark the buffer as no longer occupied by this page.
*/
@@ -1383,8 +1387,8 @@ blockNum=%d, flags=0x%x, refcount=%d %ld)",
for (i = 0; i < NBuffers; ++i, ++buf)
{
printf("[%-2d] (%s, %d) flags=0x%x, refcnt=%d %ld)\n",
- i, buf->blind.relname, buf->tag.blockNum,
- buf->flags, buf->refcount, PrivateRefCount[i]);
+ i, buf->blind.relname, buf->tag.blockNum,
+ buf->flags, buf->refcount, PrivateRefCount[i]);
}
}
}
@@ -1441,7 +1445,7 @@ BufferPoolBlowaway()
*
* This function writes all dirty pages of a relation out to disk.
* Furthermore, pages that have blocknumber >= firstDelBlock are
- * actually removed from the buffer pool. An error code is returned
+ * actually removed from the buffer pool. An error code is returned
* if we fail to dump a dirty buffer or if we find one of
* the target pages is pinned into the cache.
*
@@ -1495,15 +1499,15 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
{
if (bufHdr->flags & BM_DIRTY || bufHdr->cntxDirty)
{
- status = smgrwrite(DEFAULT_SMGR, rel,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data));
+ status = smgrwrite(DEFAULT_SMGR, rel,
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data));
if (status == SM_FAIL)
{
elog(NOTICE, "FlushRelationBuffers(%s (local), %u): block %u is dirty, could not flush it",
RelationGetRelationName(rel), firstDelBlock,
bufHdr->tag.blockNum);
- return(-1);
+ return (-1);
}
bufHdr->flags &= ~(BM_DIRTY | BM_JUST_DIRTIED);
bufHdr->cntxDirty = false;
@@ -1513,12 +1517,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
elog(NOTICE, "FlushRelationBuffers(%s (local), %u): block %u is referenced (%ld)",
RelationGetRelationName(rel), firstDelBlock,
bufHdr->tag.blockNum, LocalRefCount[i]);
- return(-2);
+ return (-2);
}
if (bufHdr->tag.blockNum >= firstDelBlock)
- {
bufHdr->tag.rnode.relNode = InvalidOid;
- }
}
}
return 0;
@@ -1559,10 +1561,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
SpinRelease(BufMgrLock);
status = smgrwrite(DEFAULT_SMGR, rel,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data));
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data));
- if (status == SM_FAIL) /* disk failure ?! */
+ if (status == SM_FAIL) /* disk failure ?! */
elog(STOP, "FlushRelationBuffers: cannot write %u for %s",
bufHdr->tag.blockNum, bufHdr->blind.relname);
@@ -1573,9 +1575,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
TerminateBufferIO(bufHdr);
Assert(!(bufHdr->flags & BM_JUST_DIRTIED));
bufHdr->flags &= ~BM_DIRTY;
+
/*
- * Note that it's safe to change cntxDirty here because
- * of we protect it from upper writers by
+ * Note that it's safe to change cntxDirty here
+ * because of we protect it from upper writers by
* AccessExclusiveLock and from other bufmgr routines
* by BM_IO_IN_PROGRESS
*/
@@ -1593,9 +1596,7 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
return -2;
}
if (bufHdr->tag.blockNum >= firstDelBlock)
- {
BufTableDelete(bufHdr);
- }
}
}
SpinRelease(BufMgrLock);
@@ -1628,9 +1629,7 @@ ReleaseBuffer(Buffer buffer)
Assert(PrivateRefCount[buffer - 1] > 0);
if (PrivateRefCount[buffer - 1] > 1)
- {
PrivateRefCount[buffer - 1]--;
- }
else
{
SpinAcquire(BufMgrLock);
@@ -1671,9 +1670,7 @@ ReleaseBufferWithBufferLock(Buffer buffer)
Assert(PrivateRefCount[buffer - 1] > 0);
if (PrivateRefCount[buffer - 1] > 1)
- {
PrivateRefCount[buffer - 1]--;
- }
else
{
PrivateRefCount[buffer - 1] = 0;
@@ -2084,8 +2081,8 @@ LockBuffer(Buffer buffer, int mode)
*buflock |= BL_W_LOCK;
/*
- * This is not the best place to set cntxDirty flag (eg indices
- * do not always change buffer they lock in excl mode). But please
+ * This is not the best place to set cntxDirty flag (eg indices do
+ * not always change buffer they lock in excl mode). But please
* remember that it's critical to set cntxDirty *before* logging
* changes with XLogInsert() - see comments in BufferSync().
*/
@@ -2200,6 +2197,7 @@ InitBufferIO(void)
{
InProgressBuf = (BufferDesc *) 0;
}
+
#endif
/*
@@ -2245,7 +2243,7 @@ AbortBufferIO(void)
* NOTE: buffer must be excl locked.
*/
void
-MarkBufferForCleanup(Buffer buffer, void (*CleanupFunc)(Buffer))
+MarkBufferForCleanup(Buffer buffer, void (*CleanupFunc) (Buffer))
{
BufferDesc *bufHdr = &BufferDescriptors[buffer - 1];
@@ -2301,5 +2299,5 @@ BufferGetFileNode(Buffer buffer)
else
bufHdr = &BufferDescriptors[buffer - 1];
- return(bufHdr->tag.rnode);
+ return (bufHdr->tag.rnode);
}
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index a6d1b95f4fa..6e3cd756411 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.39 2001/01/24 19:43:06 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.40 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
/* a low tech search for now -- not optimized for scans */
for (i = 0; i < NLocBuffer; i++)
{
- if (LocalBufferDescriptors[i].tag.rnode.relNode ==
+ if (LocalBufferDescriptors[i].tag.rnode.relNode ==
reln->rd_node.relNode &&
LocalBufferDescriptors[i].tag.blockNum == blockNum)
{
@@ -125,8 +125,8 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
/*
* it's all ours now.
*
- * We need not in tblNode currently but will in future I think,
- * when we'll give up rel->rd_fd to fmgr cache.
+ * We need not in tblNode currently but will in future I think, when
+ * we'll give up rel->rd_fd to fmgr cache.
*/
bufHdr->tag.rnode = reln->rd_node;
bufHdr->tag.blockNum = blockNum;
@@ -142,12 +142,14 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
if (data == NULL)
elog(FATAL, "Out of memory in LocalBufferAlloc");
+
/*
- * This is a bit of a hack: bufHdr->data needs to be a shmem offset
- * for consistency with the shared-buffer case, so make it one
- * even though it's not really a valid shmem offset.
+ * This is a bit of a hack: bufHdr->data needs to be a shmem
+ * offset for consistency with the shared-buffer case, so make it
+ * one even though it's not really a valid shmem offset.
*/
bufHdr->data = MAKE_OFFSET(data);
+
/*
* Set pointer for use by BufferGetBlock() macro.
*/
diff --git a/src/backend/storage/buffer/s_lock.c b/src/backend/storage/buffer/s_lock.c
index 6bb76c2cda8..647802a19ea 100644
--- a/src/backend/storage/buffer/s_lock.c
+++ b/src/backend/storage/buffer/s_lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/Attic/s_lock.c,v 1.34 2001/02/24 22:42:45 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/Attic/s_lock.c,v 1.35 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,16 +43,16 @@
#define S_NSPINCYCLE 20
int s_spincycle[S_NSPINCYCLE] =
-{ 1, 10, 100, 1000,
- 10000, 1000, 1000, 1000,
- 10000, 1000, 1000, 10000,
- 1000, 1000, 10000, 1000,
- 10000, 1000, 10000, 30000
+{1, 10, 100, 1000,
+ 10000, 1000, 1000, 1000,
+ 10000, 1000, 1000, 10000,
+ 1000, 1000, 10000, 1000,
+ 10000, 1000, 10000, 30000
};
#define AVG_SPINCYCLE 5000 /* average entry in microsec: 100ms / 20 */
-#define DEFAULT_TIMEOUT (100*1000000) /* default timeout: 100 sec */
+#define DEFAULT_TIMEOUT (100*1000000) /* default timeout: 100 sec */
/*
@@ -74,10 +74,10 @@ s_lock_stuck(volatile slock_t *lock, const char *file, const int line)
/*
* s_lock_sleep() - sleep a pseudo-random amount of time, check for timeout
*
- * The 'timeout' is given in microsec, or may be 0 for "infinity". Note that
+ * The 'timeout' is given in microsec, or may be 0 for "infinity". Note that
* this will be a lower bound (a fairly loose lower bound, on most platforms).
*
- * 'microsec' is the number of microsec to delay per loop. Normally
+ * 'microsec' is the number of microsec to delay per loop. Normally
* 'microsec' is 0, specifying to use the next s_spincycle[] value.
* Some callers may pass a nonzero interval, specifying to use exactly that
* delay value rather than a pseudo-random delay.
@@ -98,7 +98,7 @@ s_lock_sleep(unsigned spins, int timeout, int microsec,
{
delay.tv_sec = 0;
delay.tv_usec = s_spincycle[spins % S_NSPINCYCLE];
- microsec = AVG_SPINCYCLE; /* use average to figure timeout */
+ microsec = AVG_SPINCYCLE; /* use average to figure timeout */
}
if (timeout > 0)
@@ -125,10 +125,11 @@ s_lock(volatile slock_t *lock, const char *file, const int line)
* If you are thinking of changing this code, be careful. This same
* loop logic is used in other places that call TAS() directly.
*
- * While waiting for a lock, we check for cancel/die interrupts (which
- * is a no-op if we are inside a critical section). The interrupt check
- * can be omitted in places that know they are inside a critical section.
- * Note that an interrupt must NOT be accepted after acquiring the lock.
+ * While waiting for a lock, we check for cancel/die interrupts (which is
+ * a no-op if we are inside a critical section). The interrupt check
+ * can be omitted in places that know they are inside a critical
+ * section. Note that an interrupt must NOT be accepted after
+ * acquiring the lock.
*/
while (TAS(lock))
{
@@ -155,8 +156,8 @@ static void
tas_dummy() /* really means: extern int tas(slock_t
* **lock); */
{
- __asm__ __volatile__(
-"\
+ __asm__ __volatile__(
+ "\
.global _tas \n\
_tas: \n\
movel sp@(0x4),a0 \n\
@@ -180,8 +181,8 @@ _success: \n\
static void
tas_dummy()
{
- __asm__ __volatile__(
-"\
+ __asm__ __volatile__(
+ "\
.globl tas \n\
.globl _tas \n\
_tas: \n\
@@ -200,15 +201,15 @@ success: \n\
");
}
-#endif /* __APPLE__ && __ppc__ */
+#endif /* __APPLE__ && __ppc__ */
#if defined(__powerpc__)
/* Note: need a nice gcc constrained asm version so it can be inlined */
static void
tas_dummy()
{
- __asm__ __volatile__(
-"\
+ __asm__ __volatile__(
+ "\
.global tas \n\
tas: \n\
lwarx 5,0,3 \n\
@@ -231,8 +232,8 @@ success: \n\
static void
tas_dummy()
{
- __asm__ _volatile__(
-"\
+ __asm__ _volatile__(
+ "\
.global tas \n\
tas: \n\
.frame $sp, 0, $31 \n\