Fix some minor errors in new PHJ code.
authorTom Lane
Wed, 3 Jan 2018 17:53:49 +0000 (12:53 -0500)
committerTom Lane
Wed, 3 Jan 2018 17:53:49 +0000 (12:53 -0500)
Correct ExecParallelHashTuplePrealloc's estimate of whether the
space_allowed limit is exceeded.  Be more consistent about tuples that
are exactly HASH_CHUNK_THRESHOLD in size (they're "small", not "large").
Neither of these things explain the current buildfarm unhappiness, but
they're still bugs.

Thomas Munro, per gripe by me

Discussion: https://postgr.es/m/CAEepm=34PDuR69kfYVhmZPgMdy8pSA-MYbpesEN1SR+2oj3Y+w@mail.gmail.com

src/backend/executor/nodeHash.c

index 52f5c0c26e049ff613bbd45a668d9e48091aad06..a9149ef81ced38ab33c6a9d03b8971a7921eb31e 100644 (file)
@@ -2740,7 +2740,7 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
     */
    chunk = hashtable->current_chunk;
    if (chunk != NULL &&
-       size < HASH_CHUNK_THRESHOLD &&
+       size <= HASH_CHUNK_THRESHOLD &&
        chunk->maxlen - chunk->used >= size)
    {
 
@@ -3260,6 +3260,7 @@ ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
 
    Assert(batchno > 0);
    Assert(batchno < hashtable->nbatch);
+   Assert(size == MAXALIGN(size));
 
    LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
 
@@ -3280,7 +3281,8 @@ ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
 
    if (pstate->growth != PHJ_GROWTH_DISABLED &&
        batch->at_least_one_chunk &&
-       (batch->shared->estimated_size + size > pstate->space_allowed))
+       (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
+        > pstate->space_allowed))
    {
        /*
         * We have determined that this batch would exceed the space budget if