for (i = 0; i < NBuffers; i++)
{
BufferDesc *bufHdr;
- uint32 buf_state;
+ uint64 buf_state;
CHECK_FOR_INTERRUPTS();
for (int i = 0; i < NBuffers; i++)
{
BufferDesc *bufHdr;
- uint32 buf_state;
+ uint64 buf_state;
CHECK_FOR_INTERRUPTS();
* noticeably increase the cost of the function.
*/
bufHdr = GetBufferDescriptor(i);
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
if (buf_state & BM_VALID)
{
for (int i = 0; i < NBuffers; i++)
{
BufferDesc *bufHdr = GetBufferDescriptor(i);
- uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
+ uint64 buf_state = pg_atomic_read_u64(&bufHdr->state);
int usage_count;
CHECK_FOR_INTERRUPTS();
for (num_blocks = 0, i = 0; i < NBuffers; i++)
{
- uint32 buf_state;
+ uint64 buf_state;
CHECK_FOR_INTERRUPTS();
ClearBufferTag(&buf->tag);
- pg_atomic_init_u32(&buf->state, 0);
+ pg_atomic_init_u64(&buf->state, 0);
buf->wait_backend_pgprocno = INVALID_PROC_NUMBER;
buf->buf_id = i;
{
BufferDesc *bufHdr;
BufferTag tag;
- uint32 buf_state;
+ uint64 buf_state;
Assert(BufferIsValid(recent_buffer));
int b = -recent_buffer - 1;
bufHdr = GetLocalBufferDescriptor(b);
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
/* Is it still valid and holding the right tag? */
if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
bufHdr = GetLocalBufferDescriptor(-buffers[i] - 1);
else
bufHdr = GetBufferDescriptor(buffers[i] - 1);
- Assert(pg_atomic_read_u32(&bufHdr->state) & BM_TAG_VALID);
- found = pg_atomic_read_u32(&bufHdr->state) & BM_VALID;
+ Assert(pg_atomic_read_u64(&bufHdr->state) & BM_TAG_VALID);
+ found = pg_atomic_read_u64(&bufHdr->state) & BM_VALID;
}
else
{
GetBufferDescriptor(buffer - 1);
Assert(BufferGetBlockNumber(buffer) == operation->blocknum + i);
- Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_TAG_VALID);
+ Assert(pg_atomic_read_u64(&buf_hdr->state) & BM_TAG_VALID);
if (i < operation->nblocks_done)
- Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_VALID);
+ Assert(pg_atomic_read_u64(&buf_hdr->state) & BM_VALID);
}
#endif
}
int existing_buf_id;
Buffer victim_buffer;
BufferDesc *victim_buf_hdr;
- uint32 victim_buf_state;
- uint32 set_bits = 0;
+ uint64 victim_buf_state;
+ uint64 set_bits = 0;
/* Make sure we will have room to remember the buffer pin */
ResourceOwnerEnlarge(CurrentResourceOwner);
uint32 oldHash; /* hash value for oldTag */
LWLock *oldPartitionLock; /* buffer partition lock for it */
uint32 oldFlags;
- uint32 buf_state;
+ uint64 buf_state;
/* Save the original buffer tag before dropping the spinlock */
oldTag = buf->tag;
static bool
InvalidateVictimBuffer(BufferDesc *buf_hdr)
{
- uint32 buf_state;
+ uint64 buf_state;
uint32 hash;
LWLock *partition_lock;
BufferTag tag;
LWLockRelease(partition_lock);
- buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ buf_state = pg_atomic_read_u64(&buf_hdr->state);
Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
- Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u32(&buf_hdr->state)) > 0);
+ Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u64(&buf_hdr->state)) > 0);
return true;
}
{
BufferDesc *buf_hdr;
Buffer buf;
- uint32 buf_state;
+ uint64 buf_state;
bool from_ring;
/*
/* a final set of sanity checks */
#ifdef USE_ASSERT_CHECKING
- buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ buf_state = pg_atomic_read_u64(&buf_hdr->state);
Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
*/
do
{
- pg_atomic_fetch_and_u32(&existing_hdr->state, ~BM_VALID);
+ pg_atomic_fetch_and_u64(&existing_hdr->state, ~BM_VALID);
} while (!StartBufferIO(existing_hdr, true, false));
}
else
{
- uint32 buf_state;
- uint32 set_bits = 0;
+ uint64 buf_state;
+ uint64 set_bits = 0;
buf_state = LockBufHdr(victim_buf_hdr);
Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
}
- return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
+ return pg_atomic_read_u64(&bufHdr->state) & BM_DIRTY;
}
/*
MarkBufferDirty(Buffer buffer)
{
BufferDesc *bufHdr;
- uint32 buf_state;
- uint32 old_buf_state;
+ uint64 buf_state;
+ uint64 old_buf_state;
if (!BufferIsValid(buffer))
elog(ERROR, "bad buffer ID: %d", buffer);
* NB: We have to wait for the buffer header spinlock to be not held, as
* TerminateBufferIO() relies on the spinlock.
*/
- old_buf_state = pg_atomic_read_u32(&bufHdr->state);
+ old_buf_state = pg_atomic_read_u64(&bufHdr->state);
for (;;)
{
if (old_buf_state & BM_LOCKED)
Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
- if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
+ if (pg_atomic_compare_exchange_u64(&bufHdr->state, &old_buf_state,
buf_state))
break;
}
if (ref == NULL)
{
- uint32 buf_state;
- uint32 old_buf_state;
+ uint64 buf_state;
+ uint64 old_buf_state;
- old_buf_state = pg_atomic_read_u32(&buf->state);
+ old_buf_state = pg_atomic_read_u64(&buf->state);
for (;;)
{
if (unlikely(skip_if_not_valid && !(old_buf_state & BM_VALID)))
buf_state += BUF_USAGECOUNT_ONE;
}
- if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
+ if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
buf_state))
{
result = (buf_state & BM_VALID) != 0;
* that the buffer page is legitimately non-accessible here. We
* cannot meddle with that.
*/
- result = (pg_atomic_read_u32(&buf->state) & BM_VALID) != 0;
+ result = (pg_atomic_read_u64(&buf->state) & BM_VALID) != 0;
Assert(ref->data.refcount > 0);
ref->data.refcount++;
static void
PinBuffer_Locked(BufferDesc *buf)
{
- uint32 old_buf_state;
+ uint64 old_buf_state;
/*
* As explained, We don't expect any preexisting pins. That allows us to
* Since we hold the buffer spinlock, we can update the buffer state and
* release the lock in one operation.
*/
- old_buf_state = pg_atomic_read_u32(&buf->state);
+ old_buf_state = pg_atomic_read_u64(&buf->state);
UnlockBufHdrExt(buf, old_buf_state,
0, 0, 1);
* BM_PIN_COUNT_WAITER if it stops waiting for a reason other than this
* backend waking it up.
*/
- uint32 buf_state = LockBufHdr(buf);
+ uint64 buf_state = LockBufHdr(buf);
if ((buf_state & BM_PIN_COUNT_WAITER) &&
BUF_STATE_GET_REFCOUNT(buf_state) == 1)
ref->data.refcount--;
if (ref->data.refcount == 0)
{
- uint32 old_buf_state;
+ uint64 old_buf_state;
/*
* Mark buffer non-accessible to Valgrind.
Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
/* decrement the shared reference count */
- old_buf_state = pg_atomic_fetch_sub_u32(&buf->state, BUF_REFCOUNT_ONE);
+ old_buf_state = pg_atomic_fetch_sub_u64(&buf->state, BUF_REFCOUNT_ONE);
/* Support LockBufferForCleanup() */
if (old_buf_state & BM_PIN_COUNT_WAITER)
static void
BufferSync(int flags)
{
- uint32 buf_state;
+ uint64 buf_state;
int buf_id;
int num_to_scan;
int num_spaces;
Oid last_tsid;
binaryheap *ts_heap;
int i;
- uint32 mask = BM_DIRTY;
+ uint64 mask = BM_DIRTY;
WritebackContext wb_context;
/*
for (buf_id = 0; buf_id < NBuffers; buf_id++)
{
BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
- uint32 set_bits = 0;
+ uint64 set_bits = 0;
/*
* Header spinlock is enough to examine BM_DIRTY, see comment in
* write the buffer though we didn't need to. It doesn't seem worth
* guarding against this, though.
*/
- if (pg_atomic_read_u32(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
+ if (pg_atomic_read_u64(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
{
if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
{
{
BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
int result = 0;
- uint32 buf_state;
+ uint64 buf_state;
BufferTag tag;
/* Make sure we can handle the pin */
int32 loccount;
char *result;
ProcNumber backend;
- uint32 buf_state;
+ uint64 buf_state;
Assert(BufferIsValid(buffer));
if (BufferIsLocal(buffer))
}
/* theoretically we should lock the bufHdr here */
- buf_state = pg_atomic_read_u32(&buf->state);
+ buf_state = pg_atomic_read_u64(&buf->state);
- result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
+ result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%" PRIx64 ", refcount=%u %d)",
buffer,
relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
BufTagGetForkNum(&buf->tag)).str,
instr_time io_start;
Block bufBlock;
char *bufToWrite;
- uint32 buf_state;
+ uint64 buf_state;
/*
* Try to start an I/O operation. If StartBufferIO returns false, then
* not random garbage.
*/
bufHdr = GetBufferDescriptor(buffer - 1);
- return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
+ return (pg_atomic_read_u64(&bufHdr->state) & BM_PERMANENT) != 0;
}
/*
{
for (i = 0; i < NLocBuffer; i++)
{
- uint32 buf_state;
+ uint64 buf_state;
bufHdr = GetLocalBufferDescriptor(i);
if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
- ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
+ ((buf_state = pg_atomic_read_u64(&bufHdr->state)) &
(BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
ErrorContextCallback errcallback;
for (i = 0; i < NBuffers; i++)
{
- uint32 buf_state;
+ uint64 buf_state;
bufHdr = GetBufferDescriptor(i);
{
SMgrSortArray *srelent = NULL;
BufferDesc *bufHdr = GetBufferDescriptor(i);
- uint32 buf_state;
+ uint64 buf_state;
/*
* As in DropRelationBuffers, an unlocked precheck should be safe and
for (i = 0; i < NBuffers; i++)
{
- uint32 buf_state;
+ uint64 buf_state;
bufHdr = GetBufferDescriptor(i);
* is only intended to be used in cases where failing to write out the
* data would be harmless anyway, it doesn't really matter.
*/
- if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
+ if ((pg_atomic_read_u64(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
(BM_DIRTY | BM_JUST_DIRTIED))
{
XLogRecPtr lsn = InvalidXLogRecPtr;
bool dirtied = false;
bool delayChkptFlags = false;
- uint32 buf_state;
+ uint64 buf_state;
/*
* If we need to protect hint bit updates from torn writes, WAL-log a
* when we call XLogInsert() since the value changes dynamically.
*/
if (XLogHintBitIsNeeded() &&
- (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
+ (pg_atomic_read_u64(&bufHdr->state) & BM_PERMANENT))
{
/*
* If we must not write WAL, due to a relfilelocator-specific
if (buf)
{
- uint32 buf_state;
- uint32 unset_bits = 0;
+ uint64 buf_state;
+ uint64 unset_bits = 0;
buf_state = LockBufHdr(buf);
for (;;)
{
- uint32 buf_state;
- uint32 unset_bits = 0;
+ uint64 buf_state;
+ uint64 unset_bits = 0;
/* Try to acquire lock */
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
ConditionalLockBufferForCleanup(Buffer buffer)
{
BufferDesc *bufHdr;
- uint32 buf_state,
+ uint64 buf_state,
refcount;
Assert(BufferIsValid(buffer));
IsBufferCleanupOK(Buffer buffer)
{
BufferDesc *bufHdr;
- uint32 buf_state;
+ uint64 buf_state;
Assert(BufferIsValid(buffer));
ConditionVariablePrepareToSleep(cv);
for (;;)
{
- uint32 buf_state;
+ uint64 buf_state;
PgAioWaitRef iow;
/*
bool
StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
{
- uint32 buf_state;
+ uint64 buf_state;
ResourceOwnerEnlarge(CurrentResourceOwner);
* is being released)
*/
void
-TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
+TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint64 set_flag_bits,
bool forget_owner, bool release_aio)
{
- uint32 buf_state;
- uint32 unset_flag_bits = 0;
+ uint64 buf_state;
+ uint64 unset_flag_bits = 0;
int refcount_change = 0;
buf_state = LockBufHdr(buf);
AbortBufferIO(Buffer buffer)
{
BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
- uint32 buf_state;
+ uint64 buf_state;
buf_state = LockBufHdr(buf_hdr);
Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
/*
* Lock buffer header - set BM_LOCKED in buffer state.
*/
-uint32
+uint64
LockBufHdr(BufferDesc *desc)
{
- uint32 old_buf_state;
+ uint64 old_buf_state;
Assert(!BufferIsLocal(BufferDescriptorGetBuffer(desc)));
* the spin-delay infrastructure. The work necessary for that shows up
* in profiles and is rarely necessary.
*/
- old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
+ old_buf_state = pg_atomic_fetch_or_u64(&desc->state, BM_LOCKED);
if (likely(!(old_buf_state & BM_LOCKED)))
break; /* got lock */
while (old_buf_state & BM_LOCKED)
{
perform_spin_delay(&delayStatus);
- old_buf_state = pg_atomic_read_u32(&desc->state);
+ old_buf_state = pg_atomic_read_u64(&desc->state);
}
finish_spin_delay(&delayStatus);
}
* Obviously the buffer could be locked by the time the value is returned, so
* this is primarily useful in CAS style loops.
*/
-pg_noinline uint32
+pg_noinline uint64
WaitBufHdrUnlocked(BufferDesc *buf)
{
SpinDelayStatus delayStatus;
- uint32 buf_state;
+ uint64 buf_state;
init_local_spin_delay(&delayStatus);
- buf_state = pg_atomic_read_u32(&buf->state);
+ buf_state = pg_atomic_read_u64(&buf->state);
while (buf_state & BM_LOCKED)
{
perform_spin_delay(&delayStatus);
- buf_state = pg_atomic_read_u32(&buf->state);
+ buf_state = pg_atomic_read_u64(&buf->state);
}
finish_spin_delay(&delayStatus);
static bool
EvictUnpinnedBufferInternal(BufferDesc *desc, bool *buffer_flushed)
{
- uint32 buf_state;
+ uint64 buf_state;
bool result;
*buffer_flushed = false;
- buf_state = pg_atomic_read_u32(&(desc->state));
+ buf_state = pg_atomic_read_u64(&(desc->state));
Assert(buf_state & BM_LOCKED);
if ((buf_state & BM_VALID) == 0)
for (int buf = 1; buf <= NBuffers; buf++)
{
BufferDesc *desc = GetBufferDescriptor(buf - 1);
- uint32 buf_state;
+ uint64 buf_state;
bool buffer_flushed;
CHECK_FOR_INTERRUPTS();
- buf_state = pg_atomic_read_u32(&desc->state);
+ buf_state = pg_atomic_read_u64(&desc->state);
if (!(buf_state & BM_VALID))
continue;
for (int buf = 1; buf <= NBuffers; buf++)
{
BufferDesc *desc = GetBufferDescriptor(buf - 1);
- uint32 buf_state = pg_atomic_read_u32(&(desc->state));
+ uint64 buf_state = pg_atomic_read_u64(&(desc->state));
bool buffer_flushed;
CHECK_FOR_INTERRUPTS();
MarkDirtyUnpinnedBufferInternal(Buffer buf, BufferDesc *desc,
bool *buffer_already_dirty)
{
- uint32 buf_state;
+ uint64 buf_state;
bool result = false;
*buffer_already_dirty = false;
- buf_state = pg_atomic_read_u32(&(desc->state));
+ buf_state = pg_atomic_read_u64(&(desc->state));
Assert(buf_state & BM_LOCKED);
if ((buf_state & BM_VALID) == 0)
for (int buf = 1; buf <= NBuffers; buf++)
{
BufferDesc *desc = GetBufferDescriptor(buf - 1);
- uint32 buf_state = pg_atomic_read_u32(&(desc->state));
+ uint64 buf_state = pg_atomic_read_u64(&(desc->state));
bool buffer_already_dirty;
CHECK_FOR_INTERRUPTS();
for (int buf = 1; buf <= NBuffers; buf++)
{
BufferDesc *desc = GetBufferDescriptor(buf - 1);
- uint32 buf_state;
+ uint64 buf_state;
bool buffer_already_dirty;
CHECK_FOR_INTERRUPTS();
- buf_state = pg_atomic_read_u32(&desc->state);
+ buf_state = pg_atomic_read_u64(&desc->state);
if (!(buf_state & BM_VALID))
continue;
BufferDesc *buf_hdr = is_temp ?
GetLocalBufferDescriptor(-buffer - 1)
: GetBufferDescriptor(buffer - 1);
- uint32 buf_state;
+ uint64 buf_state;
/*
* Check that all the buffers are actually ones that could conceivably
}
if (is_temp)
- buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ buf_state = pg_atomic_read_u64(&buf_hdr->state);
else
buf_state = LockBufHdr(buf_hdr);
if (is_temp)
{
buf_state += BUF_REFCOUNT_ONE;
- pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
}
else
UnlockBufHdrExt(buf_hdr, buf_state, 0, 0, 1);
: GetBufferDescriptor(buffer - 1);
BufferTag tag = buf_hdr->tag;
char *bufdata = BufferGetBlock(buffer);
- uint32 set_flag_bits;
+ uint64 set_flag_bits;
int piv_flags;
/* check that the buffer is in the expected state for a read */
#ifdef USE_ASSERT_CHECKING
{
- uint32 buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ uint64 buf_state = pg_atomic_read_u64(&buf_hdr->state);
Assert(buf_state & BM_TAG_VALID);
Assert(!(buf_state & BM_VALID));
/* Prototypes for internal functions */
static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy,
- uint32 *buf_state);
+ uint64 *buf_state);
static void AddBufferToRing(BufferAccessStrategy strategy,
BufferDesc *buf);
* before returning.
*/
BufferDesc *
-StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_ring)
+StrategyGetBuffer(BufferAccessStrategy strategy, uint64 *buf_state, bool *from_ring)
{
BufferDesc *buf;
int bgwprocno;
trycounter = NBuffers;
for (;;)
{
- uint32 old_buf_state;
- uint32 local_buf_state;
+ uint64 old_buf_state;
+ uint64 local_buf_state;
buf = GetBufferDescriptor(ClockSweepTick());
* Check whether the buffer can be used and pin it if so. Do this
* using a CAS loop, to avoid having to lock the buffer header.
*/
- old_buf_state = pg_atomic_read_u32(&buf->state);
+ old_buf_state = pg_atomic_read_u64(&buf->state);
for (;;)
{
local_buf_state = old_buf_state;
{
local_buf_state -= BUF_USAGECOUNT_ONE;
- if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
+ if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
local_buf_state))
{
trycounter = NBuffers;
/* pin the buffer if the CAS succeeds */
local_buf_state += BUF_REFCOUNT_ONE;
- if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
+ if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
local_buf_state))
{
/* Found a usable buffer */
* returning.
*/
static BufferDesc *
-GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
+GetBufferFromRing(BufferAccessStrategy strategy, uint64 *buf_state)
{
BufferDesc *buf;
Buffer bufnum;
- uint32 old_buf_state;
- uint32 local_buf_state; /* to avoid repeated (de-)referencing */
+ uint64 old_buf_state;
+ uint64 local_buf_state; /* to avoid repeated (de-)referencing */
/* Advance to next ring slot */
* Check whether the buffer can be used and pin it if so. Do this using a
* CAS loop, to avoid having to lock the buffer header.
*/
- old_buf_state = pg_atomic_read_u32(&buf->state);
+ old_buf_state = pg_atomic_read_u64(&buf->state);
for (;;)
{
local_buf_state = old_buf_state;
/* pin the buffer if the CAS succeeds */
local_buf_state += BUF_REFCOUNT_ONE;
- if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
+ if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
local_buf_state))
{
*buf_state = local_buf_state;
}
else
{
- uint32 buf_state;
+ uint64 buf_state;
victim_buffer = GetLocalVictimBuffer();
bufid = -victim_buffer - 1;
*/
bufHdr->tag = newTag;
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
- pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
*foundPtr = false;
}
if (LocalRefCount[victim_bufid] == 0)
{
- uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
+ uint64 buf_state = pg_atomic_read_u64(&bufHdr->state);
if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
{
buf_state -= BUF_USAGECOUNT_ONE;
- pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
trycounter = NLocBuffer;
}
else if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
* this buffer is not referenced but it might still be dirty. if that's
* the case, write it out before reusing it!
*/
- if (pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY)
+ if (pg_atomic_read_u64(&bufHdr->state) & BM_DIRTY)
FlushLocalBuffer(bufHdr, NULL);
/*
* Remove the victim buffer from the hashtable and mark as invalid.
*/
- if (pg_atomic_read_u32(&bufHdr->state) & BM_TAG_VALID)
+ if (pg_atomic_read_u64(&bufHdr->state) & BM_TAG_VALID)
{
InvalidateLocalBuffer(bufHdr, false);
if (found)
{
BufferDesc *existing_hdr;
- uint32 buf_state;
+ uint64 buf_state;
UnpinLocalBuffer(BufferDescriptorGetBuffer(victim_buf_hdr));
/*
* Clear the BM_VALID bit, do StartLocalBufferIO() and proceed.
*/
- buf_state = pg_atomic_read_u32(&existing_hdr->state);
+ buf_state = pg_atomic_read_u64(&existing_hdr->state);
Assert(buf_state & BM_TAG_VALID);
Assert(!(buf_state & BM_DIRTY));
buf_state &= ~BM_VALID;
- pg_atomic_unlocked_write_u32(&existing_hdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&existing_hdr->state, buf_state);
/* no need to loop for local buffers */
StartLocalBufferIO(existing_hdr, true, false);
}
else
{
- uint32 buf_state = pg_atomic_read_u32(&victim_buf_hdr->state);
+ uint64 buf_state = pg_atomic_read_u64(&victim_buf_hdr->state);
Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
- pg_atomic_unlocked_write_u32(&victim_buf_hdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&victim_buf_hdr->state, buf_state);
hresult->id = victim_buf_id;
{
Buffer buf = buffers[i];
BufferDesc *buf_hdr;
- uint32 buf_state;
+ uint64 buf_state;
buf_hdr = GetLocalBufferDescriptor(-buf - 1);
- buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ buf_state = pg_atomic_read_u64(&buf_hdr->state);
buf_state |= BM_VALID;
- pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
}
*extended_by = extend_by;
{
int bufid;
BufferDesc *bufHdr;
- uint32 buf_state;
+ uint64 buf_state;
Assert(BufferIsLocal(buffer));
bufHdr = GetLocalBufferDescriptor(bufid);
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
if (!(buf_state & BM_DIRTY))
pgBufferUsage.local_blks_dirtied++;
buf_state |= BM_DIRTY;
- pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
}
/*
bool
StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait)
{
- uint32 buf_state;
+ uint64 buf_state;
/*
* With AIO the buffer could have IO in progress, e.g. when there are two
/* Once we get here, there is definitely no I/O active on this buffer */
/* Check if someone else already did the I/O */
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
{
return false;
* Like TerminateBufferIO, but for local buffers
*/
void
-TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty, uint32 set_flag_bits,
+TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty, uint64 set_flag_bits,
bool release_aio)
{
/* Only need to adjust flags */
- uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
+ uint64 buf_state = pg_atomic_read_u64(&bufHdr->state);
/* BM_IO_IN_PROGRESS isn't currently used for local buffers */
}
buf_state |= set_flag_bits;
- pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
/* local buffers don't track IO using resowners */
{
Buffer buffer = BufferDescriptorGetBuffer(bufHdr);
int bufid = -buffer - 1;
- uint32 buf_state;
+ uint64 buf_state;
LocalBufferLookupEnt *hresult;
/*
Assert(!pgaio_wref_valid(&bufHdr->io_wref));
}
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
/*
* We need to test not just LocalRefCount[bufid] but also the BufferDesc
ClearBufferTag(&bufHdr->tag);
buf_state &= ~BUF_FLAG_MASK;
buf_state &= ~BUF_USAGECOUNT_MASK;
- pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
}
/*
for (i = 0; i < NLocBuffer; i++)
{
BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
- uint32 buf_state;
+ uint64 buf_state;
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
if (!(buf_state & BM_TAG_VALID) ||
!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
for (i = 0; i < NLocBuffer; i++)
{
BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
- uint32 buf_state;
+ uint64 buf_state;
- buf_state = pg_atomic_read_u32(&bufHdr->state);
+ buf_state = pg_atomic_read_u64(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
bool
PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
{
- uint32 buf_state;
+ uint64 buf_state;
Buffer buffer = BufferDescriptorGetBuffer(buf_hdr);
int bufid = -buffer - 1;
- buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ buf_state = pg_atomic_read_u64(&buf_hdr->state);
if (LocalRefCount[bufid] == 0)
{
{
buf_state += BUF_USAGECOUNT_ONE;
}
- pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
/*
* See comment in PinBuffer().
if (--LocalRefCount[buffid] == 0)
{
BufferDesc *buf_hdr = GetLocalBufferDescriptor(buffid);
- uint32 buf_state;
+ uint64 buf_state;
NLocalPinnedBuffers--;
- buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ buf_state = pg_atomic_read_u64(&buf_hdr->state);
Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
buf_state -= BUF_REFCOUNT_ONE;
- pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
/* see comment in UnpinBufferNoOwner */
VALGRIND_MAKE_MEM_NOACCESS(LocalBufHdrGetBlock(buf_hdr), BLCKSZ);
#include "utils/resowner.h"
/*
- * Buffer state is a single 32-bit variable where following data is combined.
+ * Buffer state is a single 64-bit variable where following data is combined.
*
* State of the buffer itself (in order):
* - 18 bits refcount
* Combining these values allows to perform some operations without locking
* the buffer header, by modifying them together with a CAS loop.
*
+ * NB: A future commit will use a significant portion of the remaining bits to
+ * implement buffer locking as part of the state variable.
+ *
* The definition of buffer state components is below.
*/
#define BUF_REFCOUNT_BITS 18
/* refcount related definitions */
#define BUF_REFCOUNT_ONE 1
#define BUF_REFCOUNT_MASK \
- ((1U << BUF_REFCOUNT_BITS) - 1)
+ ((UINT64CONST(1) << BUF_REFCOUNT_BITS) - 1)
/* usage count related definitions */
#define BUF_USAGECOUNT_SHIFT \
BUF_REFCOUNT_BITS
#define BUF_USAGECOUNT_MASK \
- (((1U << BUF_USAGECOUNT_BITS) - 1) << (BUF_USAGECOUNT_SHIFT))
+ (((UINT64CONST(1) << BUF_USAGECOUNT_BITS) - 1) << (BUF_USAGECOUNT_SHIFT))
#define BUF_USAGECOUNT_ONE \
- (1U << BUF_REFCOUNT_BITS)
+ (UINT64CONST(1) << BUF_REFCOUNT_BITS)
/* flags related definitions */
#define BUF_FLAG_SHIFT \
(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS)
#define BUF_FLAG_MASK \
- (((1U << BUF_FLAG_BITS) - 1) << BUF_FLAG_SHIFT)
+ (((UINT64CONST(1) << BUF_FLAG_BITS) - 1) << BUF_FLAG_SHIFT)
/* Get refcount and usagecount from buffer state */
#define BUF_STATE_GET_REFCOUNT(state) \
- ((state) & BUF_REFCOUNT_MASK)
+ ((uint32)((state) & BUF_REFCOUNT_MASK))
#define BUF_STATE_GET_USAGECOUNT(state) \
- (((state) & BUF_USAGECOUNT_MASK) >> BUF_USAGECOUNT_SHIFT)
+ ((uint32)(((state) & BUF_USAGECOUNT_MASK) >> BUF_USAGECOUNT_SHIFT))
/*
* Flags for buffer descriptors
*/
#define BUF_DEFINE_FLAG(flagno) \
- (1U << (BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + (flagno)))
+ (UINT64CONST(1) << (BUF_FLAG_SHIFT + (flagno)))
/* buffer header is locked */
#define BM_LOCKED BUF_DEFINE_FLAG( 0)
*/
#define BM_MAX_USAGE_COUNT 5
-StaticAssertDecl(BM_MAX_USAGE_COUNT < (1 << BUF_USAGECOUNT_BITS),
+StaticAssertDecl(BM_MAX_USAGE_COUNT < (UINT64CONST(1) << BUF_USAGECOUNT_BITS),
"BM_MAX_USAGE_COUNT doesn't fit in BUF_USAGECOUNT_BITS bits");
StaticAssertDecl(MAX_BACKENDS_BITS <= BUF_REFCOUNT_BITS,
"MAX_BACKENDS_BITS needs to be <= BUF_REFCOUNT_BITS");
* We use this same struct for local buffer headers, but the locks are not
* used and not all of the flag bits are useful either. To avoid unnecessary
* overhead, manipulations of the state field should be done without actual
- * atomic operations (i.e. only pg_atomic_read_u32() and
- * pg_atomic_unlocked_write_u32()).
+ * atomic operations (i.e. only pg_atomic_read_u64() and
+ * pg_atomic_unlocked_write_u64()).
*
* Be careful to avoid increasing the size of the struct when adding or
* reordering members. Keeping it below 64 bytes (the most common CPU
* State of the buffer, containing flags, refcount and usagecount. See
* BUF_* and BM_* defines at the top of this file.
*/
- pg_atomic_uint32 state;
+ pg_atomic_uint64 state;
/*
* Backend of pin-count waiter. The buffer header spinlock needs to be
* Functions for acquiring/releasing a shared buffer header's spinlock. Do
* not apply these to local buffers!
*/
-extern uint32 LockBufHdr(BufferDesc *desc);
+extern uint64 LockBufHdr(BufferDesc *desc);
/*
* Unlock the buffer header.
static inline void
UnlockBufHdr(BufferDesc *desc)
{
- Assert(pg_atomic_read_u32(&desc->state) & BM_LOCKED);
+ Assert(pg_atomic_read_u64(&desc->state) & BM_LOCKED);
- pg_atomic_fetch_sub_u32(&desc->state, BM_LOCKED);
+ pg_atomic_fetch_sub_u64(&desc->state, BM_LOCKED);
}
/*
* Note that this approach would not work for usagecount, since we need to cap
* the usagecount at BM_MAX_USAGE_COUNT.
*/
-static inline uint32
-UnlockBufHdrExt(BufferDesc *desc, uint32 old_buf_state,
- uint32 set_bits, uint32 unset_bits,
+static inline uint64
+UnlockBufHdrExt(BufferDesc *desc, uint64 old_buf_state,
+ uint64 set_bits, uint64 unset_bits,
int refcount_change)
{
for (;;)
{
- uint32 buf_state = old_buf_state;
+ uint64 buf_state = old_buf_state;
Assert(buf_state & BM_LOCKED);
if (refcount_change != 0)
buf_state += BUF_REFCOUNT_ONE * refcount_change;
- if (pg_atomic_compare_exchange_u32(&desc->state, &old_buf_state,
+ if (pg_atomic_compare_exchange_u64(&desc->state, &old_buf_state,
buf_state))
{
return old_buf_state;
}
}
-extern uint32 WaitBufHdrUnlocked(BufferDesc *buf);
+extern uint64 WaitBufHdrUnlocked(BufferDesc *buf);
/* in bufmgr.c */
/* solely to make it easier to write tests */
extern bool StartBufferIO(BufferDesc *buf, bool forInput, bool nowait);
-extern void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
+extern void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint64 set_flag_bits,
bool forget_owner, bool release_aio);
/* freelist.c */
extern IOContext IOContextForStrategy(BufferAccessStrategy strategy);
extern BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy,
- uint32 *buf_state, bool *from_ring);
+ uint64 *buf_state, bool *from_ring);
extern bool StrategyRejectBuffer(BufferAccessStrategy strategy,
BufferDesc *buf, bool from_ring);
uint32 *extended_by);
extern void MarkLocalBufferDirty(Buffer buffer);
extern void TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty,
- uint32 set_flag_bits, bool release_aio);
+ uint64 set_flag_bits, bool release_aio);
extern bool StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait);
extern void FlushLocalBuffer(BufferDesc *bufHdr, SMgrRelation reln);
extern void InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced);
/*
* Note: MAX_BACKENDS_BITS is 18 as that is the space available for buffer
- * refcounts in buf_internals.h. This limitation could be lifted by using a
- * 64bit state; but it's unlikely to be worthwhile as 2^18-1 backends exceed
- * currently realistic configurations. Even if that limitation were removed,
- * we still could not a) exceed 2^23-1 because inval.c stores the ProcNumber
- * as a 3-byte signed integer, b) INT_MAX/4 because some places compute
- * 4*MaxBackends without any overflow check. We check that the configured
- * number of backends does not exceed MAX_BACKENDS in InitializeMaxBackends().
+ * refcounts in buf_internals.h. This limitation could be lifted, but it's
+ * unlikely to be worthwhile as 2^18-1 backends exceed currently realistic
+ * configurations. Even if that limitation were removed, we still could not a)
+ * exceed 2^23-1 because inval.c stores the ProcNumber as a 3-byte signed
+ * integer, b) INT_MAX/4 because some places compute 4*MaxBackends without any
+ * overflow check. We check that the configured number of backends does not
+ * exceed MAX_BACKENDS in InitializeMaxBackends().
*/
#define MAX_BACKENDS_BITS 18
#define MAX_BACKENDS ((1U << MAX_BACKENDS_BITS)-1)
{
Buffer buf;
BufferDesc *buf_hdr;
- uint32 buf_state;
+ uint64 buf_state;
bool was_pinned = false;
- uint32 unset_bits = 0;
+ uint64 unset_bits = 0;
/* place buffer in shared buffers without erroring out */
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK, NULL);
if (RelationUsesLocalBuffers(rel))
{
buf_hdr = GetLocalBufferDescriptor(-buf - 1);
- buf_state = pg_atomic_read_u32(&buf_hdr->state);
+ buf_state = pg_atomic_read_u64(&buf_hdr->state);
}
else
{
if (RelationUsesLocalBuffers(rel))
{
buf_state &= ~unset_bits;
- pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
+ pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
}
else
{
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- if (pg_atomic_read_u32(&buf_hdr->state) & BM_DIRTY)
+ if (pg_atomic_read_u64(&buf_hdr->state) & BM_DIRTY)
{
if (BufferIsLocal(buf))
FlushLocalBuffer(buf_hdr, NULL);
bool io_error = PG_GETARG_BOOL(3);
bool release_aio = PG_GETARG_BOOL(4);
bool clear_dirty = false;
- uint32 set_flag_bits = 0;
+ uint64 set_flag_bits = 0;
if (io_error)
set_flag_bits |= BM_IO_ERROR;