svn commit: r328965 - stable/11/lib/libc/gen
Michal Meloun
mmel at FreeBSD.org
Wed Feb 7 06:24:35 UTC 2018
Author: mmel
Date: Wed Feb 7 06:24:34 2018
New Revision: 328965
URL: https://svnweb.freebsd.org/changeset/base/328965
Log:
MFC r325364,r326794:
r325364:
Add alignment support to __libc_allocate_tls().
r326794:
Rework alignment handling in __libc_allocate_tls() for Variant I of TLS
layout.
Modified:
stable/11/lib/libc/gen/tls.c
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/lib/libc/gen/tls.c
==============================================================================
--- stable/11/lib/libc/gen/tls.c Wed Feb 7 01:54:13 2018 (r328964)
+++ stable/11/lib/libc/gen/tls.c Wed Feb 7 06:24:34 2018 (r328965)
@@ -37,9 +37,15 @@
#include <stdlib.h>
#include <string.h>
#include <elf.h>
+#include <unistd.h>
#include "libc_private.h"
+#define tls_assert(cond) ((cond) ? (void) 0 : \
+ (tls_msg(#cond ": assert failed: " __FILE__ ":" \
+ __XSTRING(__LINE__) "\n"), abort()))
+#define tls_msg(s) write(STDOUT_FILENO, s, strlen(s))
+
/* Provided by jemalloc to avoid bootstrapping issues. */
void *__je_bootstrap_malloc(size_t size);
void *__je_bootstrap_calloc(size_t num, size_t size);
@@ -81,10 +87,17 @@ void __libc_free_tls(void *tls, size_t tcbsize, size_t
#define TLS_VARIANT_II
#endif
+#if defined(__mips__) || defined(__powerpc__) || defined(__riscv)
+#define DTV_OFFSET 0x8000
+#else
+#define DTV_OFFSET 0
+#endif
+
#ifndef PIC
static size_t tls_static_space;
static size_t tls_init_size;
+static size_t tls_init_align;
static void *tls_init;
#endif
@@ -109,60 +122,190 @@ __libc_tls_get_addr(void *ti __unused)
#ifndef PIC
+static void *
+malloc_aligned(size_t size, size_t align)
+{
+ void *mem, *res;
+
+ if (align < sizeof(void *))
+ align = sizeof(void *);
+
+ mem = __je_bootstrap_malloc(size + sizeof(void *) + align - 1);
+ res = (void *)roundup2((uintptr_t)mem + sizeof(void *), align);
+ *(void **)((uintptr_t)res - sizeof(void *)) = mem;
+ return (res);
+}
+
+static void
+free_aligned(void *ptr)
+{
+ void *mem;
+ uintptr_t x;
+
+ if (ptr == NULL)
+ return;
+
+ x = (uintptr_t)ptr;
+ x -= sizeof(void *);
+ mem = *(void **)x;
+ __je_bootstrap_free(mem);
+}
+
#ifdef TLS_VARIANT_I
+/*
+ * There are two versions of variant I of TLS
+ *
+ * - ARM and aarch64 uses original variant I as is described in [1] and [2],
+ * where TP points to start of TCB followed by aligned TLS segment.
+ * Both TCB and TLS must be aligned to alignment of TLS section. The TCB[0]
+ * points to DTV vector and DTV values are real addresses (without bias).
+ * Note: for Local Exec TLS Model, the offsets from TP (TCB in this case) to
+ * TLS variables are computed by linker, so we cannot overalign TLS section.
+ *
+ * - MIPS, PowerPC and RISC-V use modified version of variant I,
+ * described in [3] where TP points (with bias) to TLS and TCB immediately
+ * precedes TLS without any alignment gap[4]. Only TLS should be aligned.
+ * The TCB[0] points to DTV vector and DTV values are biased by constant
+ * value (0x8000) from real addresses[5].
+ *
+ * [1] Ulrich Drepper: ELF Handling for Thread-Local Storage
+ * www.akkadia.org/drepper/tls.pdf
+ *
+ * [2] ARM IHI 0045E: Addenda to, and Errata in, the ABI for the ARM(r)
+ * Architecture
+ * infocenter.arm.com/help/topic/com.arm.doc.ihi0045e/IHI0045E_ABI_addenda.pdf
+ *
+ * [3] OpenPOWER: Power Architecture 64-Bit ELF V2 ABI Specification
+ * https://members.openpowerfoundation.org/document/dl/576
+ *
+ * [4] Its unclear if "without any alignment gap" is hard ABI requirement,
+ * but we must follow this rule due to suboptimal _set_tp()
+ * (aka <ARCH>_SET_TP) implementation. This function doesn't expect TP but
+ * TCB as argument.
+ *
+ * [5] I'm not able to validate "values are biased" assertions.
+ */
+
#define TLS_TCB_SIZE (2 * sizeof(void *))
/*
- * Free Static TLS using the Variant I method.
+ * Return pointer to allocated TLS block
*/
+static void *
+get_tls_block_ptr(void *tcb, size_t tcbsize)
+{
+ size_t extra_size, post_size, pre_size, tls_block_size;
+
+ /* Compute fragments sizes. */
+ extra_size = tcbsize - TLS_TCB_SIZE;
+#if defined(__aarch64__) || defined(__arm__)
+ post_size = roundup2(TLS_TCB_SIZE, tls_init_align) - TLS_TCB_SIZE;
+#else
+ post_size = 0;
+#endif
+ tls_block_size = tcbsize + post_size;
+ pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
+
+ return ((char *)tcb - pre_size - extra_size);
+}
+
+/*
+ * Free Static TLS using the Variant I method. The tcbsize
+ * and tcbalign parameters must be the same as those used to allocate
+ * the block.
+ */
void
__libc_free_tls(void *tcb, size_t tcbsize, size_t tcbalign __unused)
{
Elf_Addr *dtv;
Elf_Addr **tls;
- tls = (Elf_Addr **)((Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE);
+ tls = (Elf_Addr **)tcb;
dtv = tls[0];
__je_bootstrap_free(dtv);
- __je_bootstrap_free(tcb);
+ free_aligned(get_tls_block_ptr(tcb, tcbsize));
}
/*
* Allocate Static TLS using the Variant I method.
+ *
+ * To handle all above requirements, we setup the following layout for
+ * TLS block:
+ * (whole memory block is aligned with MAX(TLS_TCB_ALIGN, tls_init_align))
+ *
+ * +----------+--------------+--------------+-----------+------------------+
+ * | pre gap | extended TCB | TCB | post gap | TLS segment |
+ * | pre_size | extra_size | TLS_TCB_SIZE | post_size | tls_static_space |
+ * +----------+--------------+--------------+-----------+------------------+
+ *
+ * where:
+ * extra_size is tcbsize - TLS_TCB_SIZE
+ * post_size is used to adjust TCB to TLS aligment for first version of TLS
+ * layout and is always 0 for second version.
+ * pre_size is used to adjust TCB aligment for first version and to adjust
+ * TLS alignment for second version.
+ *
*/
void *
-__libc_allocate_tls(void *oldtcb, size_t tcbsize, size_t tcbalign __unused)
+__libc_allocate_tls(void *oldtcb, size_t tcbsize, size_t tcbalign)
{
- Elf_Addr *dtv;
- Elf_Addr **tls;
- char *tcb;
+ Elf_Addr *dtv, **tcb;
+ char *tls_block, *tls;
+ size_t extra_size, maxalign, post_size, pre_size, tls_block_size;
if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
return (oldtcb);
- tcb = __je_bootstrap_calloc(1, tls_static_space + tcbsize - TLS_TCB_SIZE);
- tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE);
+ tls_assert(tcbalign >= TLS_TCB_ALIGN);
+ maxalign = MAX(tcbalign, tls_init_align);
+ /* Compute fragmets sizes. */
+ extra_size = tcbsize - TLS_TCB_SIZE;
+#if defined(__aarch64__) || defined(__arm__)
+ post_size = roundup2(TLS_TCB_SIZE, tls_init_align) - TLS_TCB_SIZE;
+#else
+ post_size = 0;
+#endif
+ tls_block_size = tcbsize + post_size;
+ pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
+ tls_block_size += pre_size + tls_static_space;
+
+ /* Allocate whole TLS block */
+ tls_block = malloc_aligned(tls_block_size, maxalign);
+ if (tls_block == NULL) {
+ tls_msg("__libc_allocate_tls: Out of memory.\n");
+ abort();
+ }
+ memset(tls_block, 0, tls_block_size);
+ tcb = (Elf_Addr **)(tls_block + pre_size + extra_size);
+ tls = (char *)tcb + TLS_TCB_SIZE + post_size;
+
if (oldtcb != NULL) {
- memcpy(tls, oldtcb, tls_static_space);
- __je_bootstrap_free(oldtcb);
+ memcpy(tls_block, get_tls_block_ptr(oldtcb, tcbsize),
+ tls_block_size);
+ free_aligned(oldtcb);
/* Adjust the DTV. */
- dtv = tls[0];
- dtv[2] = (Elf_Addr)tls + TLS_TCB_SIZE;
+ dtv = tcb[0];
+ dtv[2] = (Elf_Addr)(tls + DTV_OFFSET);
} else {
dtv = __je_bootstrap_malloc(3 * sizeof(Elf_Addr));
- tls[0] = dtv;
- dtv[0] = 1;
- dtv[1] = 1;
- dtv[2] = (Elf_Addr)tls + TLS_TCB_SIZE;
+ if (dtv == NULL) {
+ tls_msg("__libc_allocate_tls: Out of memory.\n");
+ abort();
+ }
+ /* Build the DTV. */
+ tcb[0] = dtv;
+ dtv[0] = 1; /* Generation. */
+ dtv[1] = 1; /* Segments count. */
+ dtv[2] = (Elf_Addr)(tls + DTV_OFFSET);
if (tls_init_size > 0)
- memcpy((void*)dtv[2], tls_init, tls_init_size);
+ memcpy(tls, tls_init, tls_init_size);
}
- return(tcb);
+ return (tcb);
}
#endif
@@ -185,12 +328,13 @@ __libc_free_tls(void *tcb, size_t tcbsize __unused, si
* Figure out the size of the initial TLS block so that we can
* find stuff which ___tls_get_addr() allocated dynamically.
*/
+ tcbalign = MAX(tcbalign, tls_init_align);
size = roundup2(tls_static_space, tcbalign);
dtv = ((Elf_Addr**)tcb)[1];
tlsend = (Elf_Addr) tcb;
tlsstart = tlsend - size;
- __je_bootstrap_free((void*) tlsstart);
+ free_aligned((void*)tlsstart);
__je_bootstrap_free(dtv);
}
@@ -205,12 +349,22 @@ __libc_allocate_tls(void *oldtls, size_t tcbsize, size
Elf_Addr *dtv;
Elf_Addr segbase, oldsegbase;
+ tcbalign = MAX(tcbalign, tls_init_align);
size = roundup2(tls_static_space, tcbalign);
if (tcbsize < 2 * sizeof(Elf_Addr))
tcbsize = 2 * sizeof(Elf_Addr);
- tls = __je_bootstrap_calloc(1, size + tcbsize);
+ tls = malloc_aligned(size + tcbsize, tcbalign);
+ if (tls == NULL) {
+ tls_msg("__libc_allocate_tls: Out of memory.\n");
+ abort();
+ }
+ memset(tls, 0, size + tcbsize);
dtv = __je_bootstrap_malloc(3 * sizeof(Elf_Addr));
+ if (dtv == NULL) {
+ tls_msg("__libc_allocate_tls: Out of memory.\n");
+ abort();
+ }
segbase = (Elf_Addr)(tls + size);
((Elf_Addr*)segbase)[0] = segbase;
@@ -305,17 +459,11 @@ _init_tls(void)
tls_static_space = roundup2(phdr[i].p_memsz,
phdr[i].p_align);
tls_init_size = phdr[i].p_filesz;
+ tls_init_align = phdr[i].p_align;
tls_init = (void*) phdr[i].p_vaddr;
+ break;
}
}
-
-#ifdef TLS_VARIANT_I
- /*
- * tls_static_space should include space for TLS structure
- */
- tls_static_space += TLS_TCB_SIZE;
-#endif
-
tls = _rtld_allocate_tls(NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN);
_set_tp(tls);
More information about the svn-src-stable
mailing list