Fix assorted defects in 09adc9a8c09c9640de05c7023b27fb83c761e91c.
authorRobert Haas
Thu, 21 Apr 2016 17:24:09 +0000 (13:24 -0400)
committerRobert Haas
Thu, 21 Apr 2016 17:27:41 +0000 (13:27 -0400)
That commit increased all shared memory allocations to the next higher
multiple of PG_CACHE_LINE_SIZE, but it didn't ensure that allocation
started on a cache line boundary.  It also failed to remove a couple
other pieces of now-useless code.

BUFFERALIGN() is perhaps obsolete at this point, and likely should be
removed at some point, too, but that seems like it can be left to a
future cleanup.

Mistakes all pointed out by Andres Freund.  The patch is mine, with
a few extra assertions which I adopted from his version of this fix.

src/backend/storage/buffer/buf_init.c
src/backend/storage/ipc/shmem.c

index a5cffc7896828291b8e8d46b70496622ef9a450c..5804870ad48b9448348cc1ec0e490a91d0d98572 100644 (file)
@@ -76,11 +76,9 @@ InitBufferPool(void)
 
    /* Align descriptors to a cacheline boundary. */
    BufferDescriptors = (BufferDescPadded *)
-       CACHELINEALIGN(
-                      ShmemInitStruct("Buffer Descriptors",
-                                      NBuffers * sizeof(BufferDescPadded)
-                                      + PG_CACHE_LINE_SIZE,
-                                      &foundDescs));
+       ShmemInitStruct("Buffer Descriptors",
+                       NBuffers * sizeof(BufferDescPadded),
+                       &foundDescs);
 
    BufferBlocks = (char *)
        ShmemInitStruct("Buffer Blocks",
@@ -88,10 +86,9 @@ InitBufferPool(void)
 
    /* Align lwlocks to cacheline boundary */
    BufferIOLWLockArray = (LWLockMinimallyPadded *)
-       CACHELINEALIGN(ShmemInitStruct("Buffer IO Locks",
-                             NBuffers * (Size) sizeof(LWLockMinimallyPadded)
-                                      + PG_CACHE_LINE_SIZE,
-                                      &foundIOLocks));
+       ShmemInitStruct("Buffer IO Locks",
+                       NBuffers * (Size) sizeof(LWLockMinimallyPadded),
+                       &foundIOLocks);
 
    BufferIOLWLockTranche.name = "buffer_io";
    BufferIOLWLockTranche.array_base = BufferIOLWLockArray;
index 1ad68cda4e517983f0837083197d2d312ce90c4a..1efe0201a795a315e158d47bf41786d9d17c85ad 100644 (file)
@@ -112,6 +112,7 @@ void
 InitShmemAllocation(void)
 {
    PGShmemHeader *shmhdr = ShmemSegHdr;
+   char       *aligned;
 
    Assert(shmhdr != NULL);
 
@@ -139,6 +140,11 @@ InitShmemAllocation(void)
    shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
    Assert(shmhdr->freeoffset <= shmhdr->totalsize);
 
+   /* Make sure the first allocation begins on a cache line boundary. */
+   aligned = (char *)
+       (CACHELINEALIGN((((char *) shmhdr) + shmhdr->freeoffset)));
+   shmhdr->freeoffset = aligned - (char *) shmhdr;
+
    SpinLockInit(ShmemLock);
 
    /* ShmemIndex can't be set up yet (need LWLocks first) */
@@ -189,10 +195,6 @@ ShmemAlloc(Size size)
 
    newStart = ShmemSegHdr->freeoffset;
 
-   /* extra alignment for large requests, since they are probably buffers */
-   if (size >= BLCKSZ)
-       newStart = BUFFERALIGN(newStart);
-
    newFree = newStart + size;
    if (newFree <= ShmemSegHdr->totalsize)
    {
@@ -209,6 +211,8 @@ ShmemAlloc(Size size)
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of shared memory")));
 
+   Assert(newSpace == (void *) CACHELINEALIGN(newSpace));
+
    return newSpace;
 }
 
@@ -425,6 +429,9 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
    LWLockRelease(ShmemIndexLock);
 
    Assert(ShmemAddrIsValid(structPtr));
+
+   Assert(structPtr == (void *) CACHELINEALIGN(structPtr));
+
    return structPtr;
 }