</listitem>
</varlistentry>
- <varlistentry id="guc-huge-tlb-pages" xreflabel="huge_tlb_pages">
- <term><varname>huge_tlb_pages</varname> (<type>enum</type>)</term>
+ <varlistentry id="guc-huge-pages" xreflabel="huge_pages">
+ <term><varname>huge_pages</varname> (<type>enum</type>)</term>
<indexterm>
- <primary><varname>huge_tlb_pages</> configuration parameter</primary>
+ <primary><varname>huge_pages</> configuration parameter</primary>
</indexterm>
<listitem>
<para>
- Enables/disables the use of huge TLB pages. Valid values are
+ Enables/disables the use of huge memory pages. Valid values are
<literal>try</literal> (the default), <literal>on</literal>,
and <literal>off</literal>.
</para>
<para>
- At present, this feature is supported only on Linux. The setting
- is ignored on other systems.
+ At present, this feature is supported only on Linux. The setting is
+ ignored on other systems when set to <literal>try</literal>.
</para>
<para>
- The use of huge TLB pages results in smaller page tables and
- less CPU time spent on memory management, increasing performance. For
- more details, see
- <ulink url="https://wiki.debian.org/Hugepages">the Debian wiki</ulink>.
- Remember that you will need at least shared_buffers / huge page size +
- 1 huge TLB pages. So for example for a system with 6GB shared buffers
- and a hugepage size of 2kb of you will need at least 3156 huge pages.
+ The use of huge pages results in smaller page tables and less CPU time
+ spent on memory management, increasing performance. For more details,
+ see <xref linkend="linux-huge-pages">.
</para>
<para>
- With <varname>huge_tlb_pages</varname> set to <literal>try</literal>,
+ With <varname>huge_pages</varname> set to <literal>try</literal>,
the server will try to use huge pages, but fall back to using
normal allocation if that fails. With <literal>on</literal>, failure
to use huge pages will prevent the server from starting up. With
</para>
</note>
</sect2>
+
+ <sect2 id="linux-huge-pages">
+ <title>Linux huge pages</title>
+
+ <para>
+ Using huge pages reduces overhead when using large contiguous chunks of
+ memory, like <productname>PostgreSQL</productname> does. To enable this
+ feature in <productname>PostgreSQL</productname> you need a kernel
+ with <varname>CONFIG_HUGETLBFS=y</varname> and
+ <varname>CONFIG_HUGETLB_PAGE=y</varname>. You also have to tune the system
+ setting <varname>vm.nr_hugepages</varname>. To estimate the number of
+ necessary huge pages start <productname>PostgreSQL</productname> without
+ huge pages enabled and check the <varname>VmPeak</varname> value from the
+ proc filesystem:
+<programlisting>
+$ <userinput>head -1 /path/to/data/directory/postmaster.pid</userinput>
+4170
+$ <userinput>grep ^VmPeak /proc/4170/status</userinput>
+VmPeak: 6490428 kB
+</programlisting>
+ <literal>6490428</literal> / <literal>2048</literal>
+ (<varname>PAGE_SIZE</varname> is <literal>2MB</literal> in this case) are
+ roughly <literal>3169.154</literal> huge pages, so you will need at
+ least <literal>3170</literal> huge pages:
+<programlisting>
+$ <userinput>sysctl -w vm.nr_hugepages=3170</userinput>
+</programlisting>
+ Sometimes the kernel is not able to allocate the desired number of huge
+ pages, so it might be necessary to repeat that command or to reboot. Don't
+ forget to add an entry to <filename>/etc/sysctl.conf</filename> to persist
+ this setting through reboots.
+ </para>
+
+ <para>
+ The default behavior for huge pages in
+ <productname>PostgreSQL</productname> is to use them when possible and
+ to fallback to normal pages when failing. To enforce the use of huge
+ pages, you can set
+ <link linkend="guc-huge-pages"><varname>huge_pages</varname></link>
+ to <literal>on</literal>. Note that in this case
+ <productname>PostgreSQL</productname> will fail to start if not enough huge
+ pages are available.
+ </para>
+
+ <para>
+ For a detailed description of the <productname>Linux</productname> huge
+ pages feature have a look
+ at <ulink url="https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt">https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt</ulink>.
+ </para>
+
+ </sect2>
</sect1>
int mmap_errno = 0;
#ifndef MAP_HUGETLB
- if (huge_tlb_pages == HUGE_TLB_ON)
+ if (huge_pages == HUGE_PAGES_ON)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("huge TLB pages not supported on this platform")));
#else
- if (huge_tlb_pages == HUGE_TLB_ON || huge_tlb_pages == HUGE_TLB_TRY)
+ if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY)
{
/*
* Round up the request size to a suitable large value.
ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE,
PG_MMAP_FLAGS | MAP_HUGETLB, -1, 0);
mmap_errno = errno;
- if (huge_tlb_pages == HUGE_TLB_TRY && ptr == MAP_FAILED)
+ if (huge_pages == HUGE_PAGES_TRY && ptr == MAP_FAILED)
elog(DEBUG1, "mmap with MAP_HUGETLB failed, huge pages disabled: %m");
}
#endif
- if (huge_tlb_pages == HUGE_TLB_OFF ||
- (huge_tlb_pages == HUGE_TLB_TRY && ptr == MAP_FAILED))
+ if (huge_pages == HUGE_PAGES_OFF ||
+ (huge_pages == HUGE_PAGES_TRY && ptr == MAP_FAILED))
{
/*
* use the original size, not the rounded up value, when falling
Size sysvsize;
#if defined(EXEC_BACKEND) || !defined(MAP_HUGETLB)
- if (huge_tlb_pages == HUGE_TLB_ON)
+ if (huge_pages == HUGE_PAGES_ON)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("huge TLB pages not supported on this platform")));
+ errmsg("huge pages not supported on this platform")));
#endif
/* Room for a header? */
DWORD size_high;
DWORD size_low;
- if (huge_tlb_pages == HUGE_TLB_ON)
+ if (huge_pages == HUGE_PAGES_ON)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("huge TLB pages not supported on this platform")));
+ errmsg("huge pages not supported on this platform")));
/* Room for a header? */
Assert(size > MAXALIGN(sizeof(PGShmemHeader)));
* Although only "on", "off", "try" are documented, we accept all the likely
* variants of "on" and "off".
*/
-static const struct config_enum_entry huge_tlb_options[] = {
- {"off", HUGE_TLB_OFF, false},
- {"on", HUGE_TLB_ON, false},
- {"try", HUGE_TLB_TRY, false},
- {"true", HUGE_TLB_ON, true},
- {"false", HUGE_TLB_OFF, true},
- {"yes", HUGE_TLB_ON, true},
- {"no", HUGE_TLB_OFF, true},
- {"1", HUGE_TLB_ON, true},
- {"0", HUGE_TLB_OFF, true},
+static const struct config_enum_entry huge_pages_options[] = {
+ {"off", HUGE_PAGES_OFF, false},
+ {"on", HUGE_PAGES_ON, false},
+ {"try", HUGE_PAGES_TRY, false},
+ {"true", HUGE_PAGES_ON, true},
+ {"false", HUGE_PAGES_OFF, true},
+ {"yes", HUGE_PAGES_ON, true},
+ {"no", HUGE_PAGES_OFF, true},
+ {"1", HUGE_PAGES_ON, true},
+ {"0", HUGE_PAGES_OFF, true},
{NULL, 0, false}
};
* This really belongs in pg_shmem.c, but is defined here so that it doesn't
* need to be duplicated in all the different implementations of pg_shmem.c.
*/
-int huge_tlb_pages;
+int huge_pages;
/*
* These variables are all dummies that don't do anything, except in some
},
{
- {"huge_tlb_pages", PGC_POSTMASTER, RESOURCES_MEM,
- gettext_noop("Use of huge TLB pages on Linux"),
+ {"huge_pages", PGC_POSTMASTER, RESOURCES_MEM,
+ gettext_noop("Use of huge pages on Linux"),
NULL
},
- &huge_tlb_pages,
- HUGE_TLB_TRY, huge_tlb_options,
+ &huge_pages,
+ HUGE_PAGES_TRY, huge_pages_options,
NULL, NULL, NULL
},
#shared_buffers = 32MB # min 128kB
# (change requires restart)
-#huge_tlb_pages = try # on, off, or try
+#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
} PGShmemHeader;
/* GUC variable */
-extern int huge_tlb_pages;
+extern int huge_pages;
-/* Possible values for huge_tlb_pages */
+/* Possible values for huge_pages */
typedef enum
{
- HUGE_TLB_OFF,
- HUGE_TLB_ON,
- HUGE_TLB_TRY
-} HugeTlbType;
+ HUGE_PAGES_OFF,
+ HUGE_PAGES_ON,
+ HUGE_PAGES_TRY
+} HugePagesType;
#ifndef WIN32
extern unsigned long UsedShmemSegID;