server will try to request huge pages, but fall back to the default if
that fails. With on, failure to request huge pages
will prevent the server from starting up. With off,
- huge pages will not be requested.
+ huge pages will not be requested. The actual state of huge pages is
+ indicated by the server variable
+ .
+
+ huge_pages_status (enum)
+
+
huge_pages_status configuration parameter
+
+
+
+ Reports the state of huge pages in the current instance:
+ on, off, or
+ unknown (if displayed with
+ postgres -C).
+ This parameter is useful to determine whether allocation of huge pages
+ was successful under huge_pages=try.
+ See for more information.
+
+
+
+
integer_datetimes (boolean)
}
#endif
+ /*
+ * Report whether huge pages are in use. This needs to be tracked before
+ * the second mmap() call if attempting to use huge pages failed
+ * previously.
+ */
+ SetConfigOption("huge_pages_status", (ptr == MAP_FAILED) ? "off" : "on",
+ PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT);
+
if (ptr == MAP_FAILED && huge_pages != HUGE_PAGES_ON)
{
/*
sysvsize = sizeof(PGShmemHeader);
}
else
+ {
sysvsize = size;
+ /* huge pages are only available with mmap */
+ SetConfigOption("huge_pages_status", "off",
+ PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT);
+ }
+
/*
* Loop till we find a free IPC key. Trust CreateDataDirLockFile() to
* ensure no more than one postmaster per data directory can enter this
on_shmem_exit(pgwin32_SharedMemoryDelete, PointerGetDatum(hmap2));
*shim = hdr;
+
+ /* Report whether huge pages are in use */
+ SetConfigOption("huge_pages_status", (flProtect & SEC_LARGE_PAGES) ?
+ "on" : "off", PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT);
+
return hdr;
}
*/
seghdr = PGSharedMemoryCreate(size, &shim);
+ /*
+ * Make sure that huge pages are never reported as "unknown" while the
+ * server is running.
+ */
+ Assert(strcmp("unknown",
+ GetConfigOption("huge_pages_status", false, false)) != 0);
+
InitShmemAccess(seghdr);
/*
{NULL, 0, false}
};
+static const struct config_enum_entry huge_pages_status_options[] = {
+ {"off", HUGE_PAGES_OFF, false},
+ {"on", HUGE_PAGES_ON, false},
+ {"unknown", HUGE_PAGES_UNKNOWN, false},
+ {NULL, 0, false}
+};
+
static const struct config_enum_entry recovery_prefetch_options[] = {
{"off", RECOVERY_PREFETCH_OFF, false},
{"on", RECOVERY_PREFETCH_ON, false},
*/
int huge_pages = HUGE_PAGES_TRY;
int huge_page_size;
+int huge_pages_status = HUGE_PAGES_UNKNOWN;
/*
* These variables are all dummies that don't do anything, except in some
NULL, NULL, NULL
},
+ {
+ {"huge_pages_status", PGC_INTERNAL, PRESET_OPTIONS,
+ gettext_noop("Indicates the status of huge pages."),
+ NULL,
+ GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE
+ },
+ &huge_pages_status,
+ HUGE_PAGES_UNKNOWN, huge_pages_status_options,
+ NULL, NULL, NULL
+ },
+
{
{"recovery_prefetch", PGC_SIGHUP, WAL_RECOVERY,
gettext_noop("Prefetch referenced blocks during recovery."),
extern PGDLLIMPORT int huge_pages;
extern PGDLLIMPORT int huge_page_size;
-/* Possible values for huge_pages */
+/* Possible values for huge_pages and huge_pages_status */
typedef enum
{
HUGE_PAGES_OFF,
HUGE_PAGES_ON,
- HUGE_PAGES_TRY
+ HUGE_PAGES_TRY, /* only for huge_pages */
+ HUGE_PAGES_UNKNOWN /* only for huge_pages_status */
} HugePagesType;
/* Possible values for shared_memory_type */
$node->safe_psql('postgres',
q(select (string_to_array(SYSTEM_USER, ':'))[2]));
+# While on it, check the status of huge pages, that can be either on
+# or off, but never unknown.
+my $huge_pages_status =
+ $node->safe_psql('postgres', q(SHOW huge_pages_status;));
+isnt($huge_pages_status, 'unknown', "check huge_pages_status");
+
# Tests without the user name map.
# Failure as connection is attempted with a database role not mapping
# to an authorized system user.
$node->append_conf('postgresql.conf', "log_connections = on\n");
$node->start;
+my $huge_pages_status =
+ $node->safe_psql('postgres', q(SHOW huge_pages_status;));
+isnt($huge_pages_status, 'unknown', "check huge_pages_status");
+
# SSPI is set up by default. Make sure it interacts correctly with
# require_auth.
$node->connect_ok("require_auth=sspi",