summaryrefslogtreecommitdiff
path: root/sysdeps/mach/hurd/i386/tls.h
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/mach/hurd/i386/tls.h')
-rw-r--r--sysdeps/mach/hurd/i386/tls.h53
1 files changed, 48 insertions, 5 deletions
diff --git a/sysdeps/mach/hurd/i386/tls.h b/sysdeps/mach/hurd/i386/tls.h
index b194a49e34..4c78685045 100644
--- a/sysdeps/mach/hurd/i386/tls.h
+++ b/sysdeps/mach/hurd/i386/tls.h
@@ -52,6 +52,15 @@ typedef struct
#define TLS_TCB_AT_TP 1
#define TLS_DTV_AT_TP 0
+/* Alignment requirement for TCB.
+
+ Some processors such as Intel Atom pay a big penalty on every
+ access using a segment override if that segment's base is not
+ aligned to the size of a cache line. (See Intel 64 and IA-32
+ Architectures Optimization Reference Manual, section 13.3.3.3,
+ "Segment Base".) On such machines, a cache line is 64 bytes. */
+#define TCB_ALIGNMENT 64
+
#ifndef __ASSEMBLER__
/* Use i386-specific RPCs to arrange that %gs segment register prefix
@@ -78,6 +87,7 @@ typedef struct
| (((unsigned int) (tcb)) & 0xff000000) /* base 24..31 */ \
}
+# define HURD_SEL_LDT(sel) (__builtin_expect((sel) & 4, 0))
static inline const char * __attribute__ ((unused))
_hurd_tls_init (tcbhead_t *tcb)
@@ -141,9 +151,40 @@ _hurd_tls_init (tcbhead_t *tcb)
# include <mach/machine/thread_status.h>
-/* Set up TLS in the new thread of a fork child, copying from our own. */
-static inline error_t __attribute__ ((unused))
-_hurd_tls_fork (thread_t child, struct i386_thread_state *state)
+/* Set up TLS in the new thread of a fork child, copying from the original. */
+static inline kern_return_t __attribute__ ((unused))
+_hurd_tls_fork (thread_t child, thread_t orig, struct i386_thread_state *state)
+{
+ /* Fetch the selector set by _hurd_tls_init. */
+ int sel;
+ asm ("mov %%gs, %w0" : "=q" (sel) : "0" (0));
+ if (sel == state->ds) /* _hurd_tls_init was never called. */
+ return 0;
+
+ struct descriptor desc, *_desc = &desc;
+ error_t err;
+ unsigned int count = 1;
+
+ if (HURD_SEL_LDT(sel))
+ err = __i386_get_ldt (orig, sel, 1, &_desc, &count);
+ else
+ err = __i386_get_gdt (orig, sel, &desc);
+
+ assert_perror (err);
+ if (err)
+ return err;
+
+ if (HURD_SEL_LDT(sel))
+ err = __i386_set_ldt (child, sel, &desc, 1);
+ else
+ err = __i386_set_gdt (child, &sel, desc);
+
+ state->gs = sel;
+ return err;
+}
+
+static inline kern_return_t __attribute__ ((unused))
+_hurd_tls_new (thread_t child, struct i386_thread_state *state, tcbhead_t *tcb)
{
/* Fetch the selector set by _hurd_tls_init. */
int sel;
@@ -151,11 +192,13 @@ _hurd_tls_fork (thread_t child, struct i386_thread_state *state)
if (sel == state->ds) /* _hurd_tls_init was never called. */
return 0;
- tcbhead_t *const tcb = THREAD_SELF;
HURD_TLS_DESC_DECL (desc, tcb);
error_t err;
- if (__builtin_expect (sel, 0x50) & 4) /* LDT selector */
+ tcb->tcb = tcb;
+ tcb->self = child;
+
+ if (HURD_SEL_LDT(sel))
err = __i386_set_ldt (child, sel, &desc, 1);
else
err = __i386_set_gdt (child, &sel, desc);