Avoid running out of memory during hash_create, by not passing a
authorTom Lane
Tue, 30 Dec 2003 20:05:05 +0000 (20:05 +0000)
committerTom Lane
Tue, 30 Dec 2003 20:05:05 +0000 (20:05 +0000)
number-of-buckets that exceeds the size we actually plan to allow
the hash table to grow to.  Per trouble report from Sean Shanny.

src/backend/executor/nodeIndexscan.c

index 37fca4c666cce212f8af7f1d8d0aca44814ca2a4..0ece6c6702f43d218f20b9e4cc27c6579624d66e 100644 (file)
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *   $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.87 2003/11/29 19:51:48 pgsql Exp $
+ *   $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.88 2003/12/30 20:05:05 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -953,22 +953,28 @@ static void
 create_duphash(IndexScanState *node)
 {
    HASHCTL     hash_ctl;
+   long        nbuckets;
 
+   node->iss_MaxHash = (SortMem * 1024L) /
+       (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry)));
    MemSet(&hash_ctl, 0, sizeof(hash_ctl));
    hash_ctl.keysize = SizeOfIptrData;
    hash_ctl.entrysize = sizeof(DupHashTabEntry);
    hash_ctl.hash = tag_hash;
    hash_ctl.hcxt = CurrentMemoryContext;
+   nbuckets = (long) ceil(node->ss.ps.plan->plan_rows);
+   if (nbuckets < 1)
+       nbuckets = 1;
+   if (nbuckets > node->iss_MaxHash)
+       nbuckets = node->iss_MaxHash;
    node->iss_DupHash = hash_create("DupHashTable",
-                                   (long) ceil(node->ss.ps.plan->plan_rows),
+                                   nbuckets,
                                    &hash_ctl,
                                    HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
    if (node->iss_DupHash == NULL)
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of memory")));
-   node->iss_MaxHash = (SortMem * 1024L) /
-       (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry)));
 }
 
 int