On platforms where we can read or write the whole LSN atomically, we do
not need to lock the buffer header to prevent torn LSNs. We can do this
only on platforms with PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY, and when the
pd_lsn field is properly aligned.
For historical reasons the PageXLogRecPtr was defined as a struct with
two uint32 fields. This replaces it with a single uint64 value, to make
the intent clearer. To prevent issues with weak typedefs the value is
still wrapped in a struct.
This also adjusts heapfuncs() in pageinspect, to ensure proper alignment
when reading the LSN from a page on alignment-sensitive hardware.
Idea by Andres Freund. Initial patch by Andreas Karlsson, improved by
Peter Geoghegan. Minor tweaks by me.
Author: Andreas Karlsson <andreas@proxel.se>
Author: Peter Geoghegan <pg@bowt.ie>
Reviewed-by: Andres Freund <andres@anarazel.de>
Reviewed-by: Tomas Vondra <tomas@vondra.me>
Discussion: https://postgr.es/m/
b6610c3b-3f59-465a-bdbb-
8e9259f0abc4@proxel.se
#include "funcapi.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
+#include "pageinspect.h"
#include "port/pg_bitutils.h"
#include "utils/array.h"
#include "utils/builtins.h"
bytea *raw_page = PG_GETARG_BYTEA_P(0);
heap_page_items_state *inter_call_data = NULL;
FuncCallContext *fctx;
- int raw_page_size;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to use raw page functions")));
- raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
-
if (SRF_IS_FIRSTCALL())
{
TupleDesc tupdesc;
MemoryContext mctx;
- if (raw_page_size < SizeOfPageHeaderData)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page too small (%d bytes)", raw_page_size)));
-
fctx = SRF_FIRSTCALL_INIT();
mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
inter_call_data->tupd = tupdesc;
inter_call_data->offset = FirstOffsetNumber;
- inter_call_data->page = VARDATA(raw_page);
+ inter_call_data->page = get_page_from_raw(raw_page);
fctx->max_calls = PageGetMaxOffsetNumber(inter_call_data->page);
fctx->user_fctx = inter_call_data;
if (ItemIdHasStorage(id) &&
lp_len >= MinHeapTupleSize &&
lp_offset == MAXALIGN(lp_offset) &&
- lp_offset + lp_len <= raw_page_size)
+ lp_offset + lp_len <= BLCKSZ)
{
HeapTupleHeader tuphdr;
* Get a palloc'd, maxalign'ed page image from the result of get_raw_page()
*
* On machines with MAXALIGN = 8, the payload of a bytea is not maxaligned,
- * since it will start 4 bytes into a palloc'd value. On alignment-picky
- * machines, this will cause failures in accesses to 8-byte-wide values
- * within the page. We don't need to worry if accessing only 4-byte or
- * smaller fields, but when examining a struct that contains 8-byte fields,
- * use this function for safety.
+ * since it will start 4 bytes into a palloc'd value. PageHeaderData requires
+ * 8 byte alignment, so always use this function when accessing page header
+ * fields from a raw page bytea.
*/
Page
get_page_from_raw(bytea *raw_page)
{
PageHeader phdr = (PageHeader) page;
- PageXLogRecPtrSet(phdr->pd_lsn, (uint64) MASK_MARKER);
+ PageXLogRecPtrSet(&phdr->pd_lsn, (uint64) MASK_MARKER);
phdr->pd_checksum = MASK_MARKER;
}
/*
* BufferGetLSNAtomic
- * Retrieves the LSN of the buffer atomically using a buffer header lock.
- * This is necessary for some callers who may only hold a share lock on
- * the buffer. A share lock allows a concurrent backend to set hint bits
- * on the page, which in turn may require a WAL record to be emitted.
+ * Retrieves the LSN of the buffer atomically.
+ *
+ * This is necessary for some callers who may only hold a share lock on
+ * the buffer. A share lock allows a concurrent backend to set hint bits
+ * on the page, which in turn may require a WAL record to be emitted.
+ *
+ * On platforms with 8 byte atomic reads/writes, we don't need to do any
+ * additional locking. On platforms not supporting such 8 byte atomic
+ * reads/writes, we need to actually take the header lock.
*/
XLogRecPtr
BufferGetLSNAtomic(Buffer buffer)
{
- char *page = BufferGetPage(buffer);
- BufferDesc *bufHdr;
- XLogRecPtr lsn;
-
- /*
- * If we don't need locking for correctness, fastpath out.
- */
- if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
- return PageGetLSN(page);
-
/* Make sure we've got a real buffer, and that we hold a pin on it. */
Assert(BufferIsValid(buffer));
Assert(BufferIsPinned(buffer));
- bufHdr = GetBufferDescriptor(buffer - 1);
- LockBufHdr(bufHdr);
- lsn = PageGetLSN(page);
- UnlockBufHdr(bufHdr);
+#ifdef PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY
+ return PageGetLSN(BufferGetPage(buffer));
+#else
+ {
+ char *page = BufferGetPage(buffer);
+ BufferDesc *bufHdr;
+ XLogRecPtr lsn;
+
+ /*
+ * If we don't need locking for correctness, fastpath out.
+ */
+ if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
+ return PageGetLSN(page);
+
+ bufHdr = GetBufferDescriptor(buffer - 1);
+ LockBufHdr(bufHdr);
+ lsn = PageGetLSN(page);
+ UnlockBufHdr(bufHdr);
- return lsn;
+ return lsn;
+ }
+#endif
}
/* ---------------------------------------------------------------------
#define GistMarkFollowRight(page) ( GistPageGetOpaque(page)->flags |= F_FOLLOW_RIGHT)
#define GistClearFollowRight(page) ( GistPageGetOpaque(page)->flags &= ~F_FOLLOW_RIGHT)
-#define GistPageGetNSN(page) ( PageXLogRecPtrGet(GistPageGetOpaque(page)->nsn))
-#define GistPageSetNSN(page, val) ( PageXLogRecPtrSet(GistPageGetOpaque(page)->nsn, val))
+#define GistPageGetNSN(page) ( PageXLogRecPtrGet(&GistPageGetOpaque(page)->nsn))
+#define GistPageSetNSN(page, val) ( PageXLogRecPtrSet(&GistPageGetOpaque(page)->nsn, val))
/*
/*
- * For historical reasons, the 64-bit LSN value is stored as two 32-bit
- * values.
+ * Store the LSN as a single 64-bit value, to allow atomic loads/stores.
+ *
+ * For historical reasons, the storage of 64-bit LSN values depends on CPU
+ * endianness; PageXLogRecPtr used to be a struct consisting of two 32-bit
+ * values. When reading (and writing) the pd_lsn field from page headers, the
+ * caller must convert from (and convert to) the platform's native endianness.
*/
typedef struct
{
- uint32 xlogid; /* high bits */
- uint32 xrecoff; /* low bits */
+ uint64 lsn;
} PageXLogRecPtr;
+#ifdef WORDS_BIGENDIAN
+
static inline XLogRecPtr
-PageXLogRecPtrGet(PageXLogRecPtr val)
+PageXLogRecPtrGet(const volatile PageXLogRecPtr *val)
{
- return (uint64) val.xlogid << 32 | val.xrecoff;
+ return val->lsn;
}
-#define PageXLogRecPtrSet(ptr, lsn) \
- ((ptr).xlogid = (uint32) ((lsn) >> 32), (ptr).xrecoff = (uint32) (lsn))
+static inline void
+PageXLogRecPtrSet(volatile PageXLogRecPtr *ptr, XLogRecPtr lsn)
+{
+ ptr->lsn = lsn;
+}
+
+#else
+
+static inline XLogRecPtr
+PageXLogRecPtrGet(const volatile PageXLogRecPtr *val)
+{
+ PageXLogRecPtr tmp = {val->lsn};
+
+ return (tmp.lsn << 32) | (tmp.lsn >> 32);
+}
+
+static inline void
+PageXLogRecPtrSet(volatile PageXLogRecPtr *ptr, XLogRecPtr lsn)
+{
+ ptr->lsn = (lsn << 32) | (lsn >> 32);
+}
+
+#endif
/*
* disk page organization
static inline XLogRecPtr
PageGetLSN(const PageData *page)
{
- return PageXLogRecPtrGet(((const PageHeaderData *) page)->pd_lsn);
+ return PageXLogRecPtrGet(&((const PageHeaderData *) page)->pd_lsn);
}
+
static inline void
PageSetLSN(Page page, XLogRecPtr lsn)
{
- PageXLogRecPtrSet(((PageHeader) page)->pd_lsn, lsn);
+ PageXLogRecPtrSet(&((PageHeader) page)->pd_lsn, lsn);
}
static inline bool