summaryrefslogtreecommitdiff
path: root/linuxthreads/sysdeps/i386
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2004-12-22 20:10:10 +0000
committerUlrich Drepper <drepper@redhat.com>2004-12-22 20:10:10 +0000
commita334319f6530564d22e775935d9c91663623a1b4 (patch)
treeb5877475619e4c938e98757d518bb1e9cbead751 /linuxthreads/sysdeps/i386
parent0ecb606cb6cf65de1d9fc8a919bceb4be476c602 (diff)
downloadglibc-a334319f6530564d22e775935d9c91663623a1b4.tar.gz
(CFLAGS-tst-align.c): Add -mpreferred-stack-boundary=4.
Diffstat (limited to 'linuxthreads/sysdeps/i386')
-rw-r--r--linuxthreads/sysdeps/i386/Makefile23
-rw-r--r--linuxthreads/sysdeps/i386/i586/Versions5
-rw-r--r--linuxthreads/sysdeps/i386/i686/Versions5
-rw-r--r--linuxthreads/sysdeps/i386/i686/pt-machine.h79
-rw-r--r--linuxthreads/sysdeps/i386/pspinlock.c103
-rw-r--r--linuxthreads/sysdeps/i386/pt-machine.h108
-rw-r--r--linuxthreads/sysdeps/i386/tcb-offsets.sym7
-rw-r--r--linuxthreads/sysdeps/i386/tls.h225
-rw-r--r--linuxthreads/sysdeps/i386/useldt.h314
9 files changed, 869 insertions, 0 deletions
diff --git a/linuxthreads/sysdeps/i386/Makefile b/linuxthreads/sysdeps/i386/Makefile
new file mode 100644
index 0000000000..418fa5c6ef
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/Makefile
@@ -0,0 +1,23 @@
+ifeq ($(subdir),linuxthreads)
+# On i686 we must avoid generating the trampoline functions generated
+# to get the GOT pointer.
+CFLAGS-pt-initfini.s += -march=i386 -mcpu=i386
+
+# Most files must not be compiled without frame pointer since we need
+# the frame base address which is stored in %ebp unless the frame pointer
+# is optimized out.
+CFLAGS-cancel.c += -fno-omit-frame-pointer -mpreferred-stack-boundary=4
+CFLAGS-condvar.c += -fno-omit-frame-pointer
+CFLAGS-join.c += -fno-omit-frame-pointer
+CFLAGS-manager.c += -fno-omit-frame-pointer -mpreferred-stack-boundary=4
+CFLAGS-oldsemaphore.c += -fno-omit-frame-pointer
+CFLAGS-pthread.c += -fno-omit-frame-pointer -mpreferred-stack-boundary=4
+CFLAGS-ptlongjmp.c += -fno-omit-frame-pointer
+CFLAGS-semaphore.c += -fno-omit-frame-pointer
+CFLAGS-sighandler.c += -fno-omit-frame-pointer -mpreferred-stack-boundary=4
+CFLAGS-tst-align.c += -mpreferred-stack-boundary=4
+endif
+
+ifeq ($(subdir),csu)
+gen-as-const-headers += tcb-offsets.sym
+endif
diff --git a/linuxthreads/sysdeps/i386/i586/Versions b/linuxthreads/sysdeps/i386/i586/Versions
new file mode 100644
index 0000000000..32da57080d
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/i586/Versions
@@ -0,0 +1,5 @@
+libpthread {
+ GLIBC_PRIVATE {
+ __pthread_clock_gettime; __pthread_clock_settime;
+ }
+}
diff --git a/linuxthreads/sysdeps/i386/i686/Versions b/linuxthreads/sysdeps/i386/i686/Versions
new file mode 100644
index 0000000000..32da57080d
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/i686/Versions
@@ -0,0 +1,5 @@
+libpthread {
+ GLIBC_PRIVATE {
+ __pthread_clock_gettime; __pthread_clock_settime;
+ }
+}
diff --git a/linuxthreads/sysdeps/i386/i686/pt-machine.h b/linuxthreads/sysdeps/i386/i686/pt-machine.h
new file mode 100644
index 0000000000..1c75bf9807
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/i686/pt-machine.h
@@ -0,0 +1,79 @@
+/* Machine-dependent pthreads configuration and inline functions.
+ i686 version.
+ Copyright (C) 1996-2001, 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson <rth@tamu.edu>.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#ifndef PT_EI
+# define PT_EI extern inline __attribute__ ((always_inline))
+#endif
+#include "kernel-features.h"
+
+#ifndef __ASSEMBLER__
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
+/* Get some notion of the current stack. Need not be exactly the top
+ of the stack, just something somewhere in the current frame. */
+#define CURRENT_STACK_FRAME __builtin_frame_address (0)
+
+
+/* Spinlock implementation; required. */
+PT_EI long int
+testandset (int *spinlock)
+{
+ long int ret;
+
+ __asm__ __volatile__ (
+ "xchgl %0, %1"
+ : "=r" (ret), "=m" (*spinlock)
+ : "0" (1), "m" (*spinlock)
+ : "memory");
+
+ return ret;
+}
+
+
+/* Compare-and-swap for semaphores. It's always available on i686. */
+#define HAS_COMPARE_AND_SWAP
+
+PT_EI int
+__compare_and_swap (long int *p, long int oldval, long int newval)
+{
+ char ret;
+ long int readval;
+
+ __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
+ : "=q" (ret), "=m" (*p), "=a" (readval)
+ : "r" (newval), "m" (*p), "a" (oldval)
+ : "memory");
+ return ret;
+}
+#endif
+
+#if __ASSUME_LDT_WORKS > 0
+#include "../useldt.h"
+#endif
+
+/* The P4 and above really want some help to prevent overheating. */
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
+
+#endif /* pt-machine.h */
diff --git a/linuxthreads/sysdeps/i386/pspinlock.c b/linuxthreads/sysdeps/i386/pspinlock.c
new file mode 100644
index 0000000000..6a70093957
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/pspinlock.c
@@ -0,0 +1,103 @@
+/* POSIX spinlock implementation. x86 version.
+ Copyright (C) 2000, 2002 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+#include "internals.h"
+#include "kernel-features.h"
+
+
+/* This implementation is similar to the one used in the Linux kernel.
+ But the kernel is byte instructions for the memory access. This is
+ faster but unusable here. The problem is that only 128
+ threads/processes could use the spinlock at the same time. If (by
+ a design error in the program) a thread/process would hold the
+ spinlock for a time long enough to accumulate 128 waiting
+ processes, the next one will find a positive value in the spinlock
+ and assume it is unlocked. We cannot accept that. */
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ asm volatile
+ ("\n"
+ "1:\n\t"
+ "lock; decl %0\n\t"
+ "js 2f\n\t"
+ ".section .text.spinlock,\"ax\"\n"
+ "2:\n\t"
+ "cmpl $0,%0\n\t"
+ "rep; nop\n\t"
+ "jle 2b\n\t"
+ "jmp 1b\n\t"
+ ".previous"
+ : "=m" (*lock));
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ int oldval;
+
+ asm volatile
+ ("xchgl %0,%1"
+ : "=r" (oldval), "=m" (*lock)
+ : "0" (0));
+ return oldval > 0 ? 0 : EBUSY;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ asm volatile
+ ("movl $1,%0"
+ : "=m" (*lock));
+ return 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ *lock = 1;
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
+
+#ifndef __ASSUME_SET_THREAD_AREA_SYSCALL
+int __have_no_set_thread_area;
+#endif
diff --git a/linuxthreads/sysdeps/i386/pt-machine.h b/linuxthreads/sysdeps/i386/pt-machine.h
new file mode 100644
index 0000000000..0df096d152
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/pt-machine.h
@@ -0,0 +1,108 @@
+/* Machine-dependent pthreads configuration and inline functions.
+ i386 version.
+ Copyright (C) 1996-2001, 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson <rth@tamu.edu>.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#ifndef __ASSEMBLER__
+#ifndef PT_EI
+# define PT_EI extern inline __attribute__ ((always_inline))
+#endif
+
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
+/* Get some notion of the current stack. Need not be exactly the top
+ of the stack, just something somewhere in the current frame. */
+#define CURRENT_STACK_FRAME __builtin_frame_address (0)
+
+
+/* Spinlock implementation; required. */
+PT_EI long int
+testandset (int *spinlock)
+{
+ long int ret;
+
+ __asm__ __volatile__(
+ "xchgl %0, %1"
+ : "=r"(ret), "=m"(*spinlock)
+ : "0"(1), "m"(*spinlock)
+ : "memory");
+
+ return ret;
+}
+
+
+/* Compare-and-swap for semaphores.
+ Available on the 486 and above, but not on the 386.
+ We test dynamically whether it's available or not. */
+
+#define HAS_COMPARE_AND_SWAP
+#define TEST_FOR_COMPARE_AND_SWAP
+
+PT_EI int
+__compare_and_swap (long int *p, long int oldval, long int newval)
+{
+ char ret;
+ long int readval;
+
+ __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
+ : "=q" (ret), "=m" (*p), "=a" (readval)
+ : "r" (newval), "m" (*p), "a" (oldval)
+ : "memory");
+ return ret;
+}
+
+
+PT_EI int
+get_eflags (void)
+{
+ int res;
+ __asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : );
+ return res;
+}
+
+
+PT_EI void
+set_eflags (int newflags)
+{
+ __asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc");
+}
+
+
+PT_EI int
+compare_and_swap_is_available (void)
+{
+ int oldflags = get_eflags ();
+ int changed;
+ /* Flip AC bit in EFLAGS. */
+ set_eflags (oldflags ^ 0x40000);
+ /* See if bit changed. */
+ changed = (get_eflags () ^ oldflags) & 0x40000;
+ /* Restore EFLAGS. */
+ set_eflags (oldflags);
+ /* If the AC flag did not change, it's a 386 and it lacks cmpxchg.
+ Otherwise, it's a 486 or above and it has cmpxchg. */
+ return changed != 0;
+}
+#endif /* __ASSEMBLER__ */
+
+#endif /* pt-machine.h */
diff --git a/linuxthreads/sysdeps/i386/tcb-offsets.sym b/linuxthreads/sysdeps/i386/tcb-offsets.sym
new file mode 100644
index 0000000000..69a5018d88
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/tcb-offsets.sym
@@ -0,0 +1,7 @@
+#include <sysdep.h>
+#include <tls.h>
+
+MULTIPLE_THREADS_OFFSET offsetof (tcbhead_t, multiple_threads)
+#ifdef NEED_DL_SYSINFO
+SYSINFO_OFFSET offsetof (tcbhead_t, sysinfo)
+#endif
diff --git a/linuxthreads/sysdeps/i386/tls.h b/linuxthreads/sysdeps/i386/tls.h
new file mode 100644
index 0000000000..5306d082bb
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/tls.h
@@ -0,0 +1,225 @@
+/* Definition for thread-local data handling. linuxthreads/i386 version.
+ Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _TLS_H
+#define _TLS_H
+
+# include <dl-sysdep.h>
+# include <pt-machine.h>
+
+#ifndef __ASSEMBLER__
+# include <stddef.h>
+# include <stdint.h>
+
+/* Type for the dtv. */
+typedef union dtv
+{
+ size_t counter;
+ void *pointer;
+} dtv_t;
+
+
+typedef struct
+{
+ void *tcb; /* Pointer to the TCB. Not necessary the
+ thread descriptor used by libpthread. */
+ dtv_t *dtv;
+ void *self; /* Pointer to the thread descriptor. */
+ int multiple_threads;
+#ifdef NEED_DL_SYSINFO
+ uintptr_t sysinfo;
+#endif
+} tcbhead_t;
+
+#else /* __ASSEMBLER__ */
+# include <tcb-offsets.h>
+#endif
+
+/* We can support TLS only if the floating-stack support is available.
+ However, we want to compile in the support and test at runtime whether
+ the running kernel can support it or not. To avoid bothering with the
+ TLS support code at all, use configure --without-tls.
+
+ We need USE_TLS to be consistently defined, for ldsodefs.h conditionals.
+ But some of the code below can cause problems in building libpthread
+ (e.g. useldt.h will defined FLOATING_STACKS when it shouldn't). */
+
+#if defined HAVE_TLS_SUPPORT \
+ && (defined FLOATING_STACKS || !defined IS_IN_libpthread)
+
+/* Signal that TLS support is available. */
+# define USE_TLS 1
+
+# ifndef __ASSEMBLER__
+/* Get system call information. */
+# include <sysdep.h>
+
+
+/* Get the thread descriptor definition. */
+# include <linuxthreads/descr.h>
+
+/* This is the size of the initial TCB. */
+# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t)
+
+/* Alignment requirements for the initial TCB. */
+# define TLS_INIT_TCB_ALIGN __alignof__ (tcbhead_t)
+
+/* This is the size of the TCB. */
+# define TLS_TCB_SIZE sizeof (struct _pthread_descr_struct)
+
+/* Alignment requirements for the TCB. */
+# define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
+
+/* The TCB can have any size and the memory following the address the
+ thread pointer points to is unspecified. Allocate the TCB there. */
+# define TLS_TCB_AT_TP 1
+
+
+/* Install the dtv pointer. The pointer passed is to the element with
+ index -1 which contain the length. */
+# define INSTALL_DTV(descr, dtvp) \
+ ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
+
+/* Install new dtv for current thread. */
+# define INSTALL_NEW_DTV(dtv) \
+ ({ struct _pthread_descr_struct *__descr; \
+ THREAD_SETMEM (__descr, p_header.data.dtvp, (dtv)); })
+
+/* Return dtv of given thread descriptor. */
+# define GET_DTV(descr) \
+ (((tcbhead_t *) (descr))->dtv)
+
+# ifdef __PIC__
+# define TLS_EBX_ARG "r"
+# define TLS_LOAD_EBX "xchgl %3, %%ebx\n\t"
+# else
+# define TLS_EBX_ARG "b"
+# define TLS_LOAD_EBX
+# endif
+
+# if !defined IS_IN_linuxthreads && !defined DO_MODIFY_LDT
+# include "useldt.h" /* For the structure. */
+# endif
+# if __ASSUME_LDT_WORKS > 0
+# define TLS_DO_MODIFY_LDT_KERNEL_CHECK(doit) (doit) /* Nothing to check. */
+# else
+# define TLS_DO_MODIFY_LDT_KERNEL_CHECK(doit) \
+ (__builtin_expect (GLRO(dl_osversion) < 131939, 0) \
+ ? "kernel too old for thread-local storage support\n" \
+ : (doit))
+# endif
+
+# define TLS_DO_MODIFY_LDT(descr, nr) \
+TLS_DO_MODIFY_LDT_KERNEL_CHECK( \
+({ \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ int result; \
+ asm volatile (TLS_LOAD_EBX \
+ "int $0x80\n\t" \
+ TLS_LOAD_EBX \
+ : "=a" (result) \
+ : "0" (__NR_modify_ldt), \
+ /* The extra argument with the "m" constraint is necessary \
+ to let the compiler know that we are accessing LDT_ENTRY \
+ here. */ \
+ "m" (ldt_entry), TLS_EBX_ARG (1), "c" (&ldt_entry), \
+ "d" (sizeof (ldt_entry))); \
+ __builtin_expect (result, 0) == 0 \
+ ? ({ asm ("movw %w0, %%gs" : : "q" ((nr) * 8 + 7)); NULL; }) \
+ : "cannot set up LDT for thread-local storage\n"; \
+}))
+
+# define TLS_DO_SET_THREAD_AREA(descr, secondcall) \
+({ \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { -1, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ int result; \
+ if (secondcall) \
+ ldt_entry.entry_number = ({ int _gs; \
+ asm ("movw %%gs, %w0" : "=q" (_gs)); \
+ (_gs & 0xffff) >> 3; }); \
+ asm volatile (TLS_LOAD_EBX \
+ "int $0x80\n\t" \
+ TLS_LOAD_EBX \
+ : "=a" (result), "=m" (ldt_entry.entry_number) \
+ : "0" (__NR_set_thread_area), \
+ /* The extra argument with the "m" constraint is necessary \
+ to let the compiler know that we are accessing LDT_ENTRY \
+ here. */ \
+ TLS_EBX_ARG (&ldt_entry), "m" (ldt_entry)); \
+ if (__builtin_expect (result, 0) == 0) \
+ asm ("movw %w0, %%gs" : : "q" (ldt_entry.entry_number * 8 + 3)); \
+ result; \
+})
+
+# ifdef __ASSUME_SET_THREAD_AREA_SYSCALL
+# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \
+ (TLS_DO_SET_THREAD_AREA (descr, secondcall) \
+ ? "set_thread_area failed when setting up thread-local storage\n" : NULL)
+# elif defined __NR_set_thread_area
+# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \
+ (TLS_DO_SET_THREAD_AREA (descr, secondcall) \
+ ? TLS_DO_MODIFY_LDT (descr, 0) : NULL)
+# else
+# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \
+ TLS_DO_MODIFY_LDT ((descr), 0)
+# endif
+
+#if defined NEED_DL_SYSINFO
+# define INIT_SYSINFO \
+ head->sysinfo = GLRO(dl_sysinfo)
+#else
+# define INIT_SYSINFO
+#endif
+
+/* Code to initially initialize the thread pointer. This might need
+ special attention since 'errno' is not yet available and if the
+ operation can cause a failure 'errno' must not be touched.
+
+ The value of this macro is null if successful, or an error string. */
+# define TLS_INIT_TP(descr, secondcall) \
+ ({ \
+ void *_descr = (descr); \
+ tcbhead_t *head = _descr; \
+ \
+ head->tcb = _descr; \
+ /* For now the thread descriptor is at the same address. */ \
+ head->self = _descr; \
+ \
+ INIT_SYSINFO; \
+ TLS_SETUP_GS_SEGMENT (_descr, secondcall); \
+ })
+
+/* Indicate that dynamic linker shouldn't try to initialize TLS even
+ when no PT_TLS segments are found in the program and libraries
+ it is linked against. */
+# define TLS_INIT_TP_EXPENSIVE 1
+
+/* Return the address of the dtv for the current thread. */
+# define THREAD_DTV() \
+ ({ struct _pthread_descr_struct *__descr; \
+ THREAD_GETMEM (__descr, p_header.data.dtvp); })
+
+# endif /* HAVE_TLS_SUPPORT && (FLOATING_STACKS || !IS_IN_libpthread) */
+#endif /* __ASSEMBLER__ */
+
+#endif /* tls.h */
diff --git a/linuxthreads/sysdeps/i386/useldt.h b/linuxthreads/sysdeps/i386/useldt.h
new file mode 100644
index 0000000000..4ac82f1ab0
--- /dev/null
+++ b/linuxthreads/sysdeps/i386/useldt.h
@@ -0,0 +1,314 @@
+/* Special definitions for ix86 machine using segment register based
+ thread descriptor.
+ Copyright (C) 1998, 2000, 2001, 2002 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#ifndef __ASSEMBLER__
+#include <stddef.h> /* For offsetof. */
+#include <stdlib.h> /* For abort(). */
+#include <sysdep.h>
+
+
+/* We don't want to include the kernel header. So duplicate the
+ information. */
+
+/* Structure passed on `modify_ldt' call. */
+struct modify_ldt_ldt_s
+{
+ unsigned int entry_number;
+ unsigned long int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ unsigned int empty:25;
+};
+
+/* System call to set LDT entry. */
+extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
+
+
+/* Return the thread descriptor for the current thread.
+
+ The contained asm must *not* be marked volatile since otherwise
+ assignments like
+ pthread_descr self = thread_self();
+ do not get optimized away. */
+#define THREAD_SELF \
+({ \
+ register pthread_descr __self; \
+ __asm__ ("movl %%gs:%c1,%0" : "=r" (__self) \
+ : "i" (offsetof (struct _pthread_descr_struct, \
+ p_header.data.self))); \
+ __self; \
+})
+
+
+/* Initialize the thread-unique value. Two possible ways to do it. */
+
+#define DO_MODIFY_LDT(descr, nr) \
+({ \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ if (__modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) \
+ abort (); \
+ asm ("movw %w0, %%gs" : : "q" (nr * 8 + 7)); \
+})
+
+#ifdef __PIC__
+# define USETLS_EBX_ARG "r"
+# define USETLS_LOAD_EBX "xchgl %1, %%ebx\n\t"
+#else
+# define USETLS_EBX_ARG "b"
+# define USETLS_LOAD_EBX
+#endif
+
+/* When using the new set_thread_area call, we don't need to change %gs
+ because we inherited the value set up in the main thread by TLS setup.
+ We need to extract that value and set up the same segment in this
+ thread. */
+#if USE_TLS
+# define DO_SET_THREAD_AREA_REUSE(nr) 1
+#else
+/* Without TLS, we do the initialization of the main thread, where NR == 0. */
+# define DO_SET_THREAD_AREA_REUSE(nr) (!__builtin_constant_p (nr) || (nr))
+#endif
+#define DO_SET_THREAD_AREA(descr, nr) \
+({ \
+ int __gs; \
+ if (DO_SET_THREAD_AREA_REUSE (nr)) \
+ { \
+ asm ("movw %%gs, %w0" : "=q" (__gs)); \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { (__gs & 0xffff) >> 3, \
+ (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ \
+ int __result; \
+ __asm (USETLS_LOAD_EBX \
+ "movl %2, %%eax\n\t" \
+ "int $0x80\n\t" \
+ USETLS_LOAD_EBX \
+ : "=&a" (__result) \
+ : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area), \
+ "m" (ldt_entry) \
+ : "memory"); \
+ if (__result == 0) \
+ asm ("movw %w0, %%gs" :: "q" (__gs)); \
+ else \
+ __gs = -1; \
+ } \
+ else \
+ { \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { -1, \
+ (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ int __result; \
+ __asm (USETLS_LOAD_EBX \
+ "movl %2, %%eax\n\t" \
+ "int $0x80\n\t" \
+ USETLS_LOAD_EBX \
+ : "=&a" (__result) \
+ : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area), \
+ "m" (ldt_entry) \
+ : "memory"); \
+ if (__result == 0) \
+ { \
+ __gs = (ldt_entry.entry_number << 3) + 3; \
+ asm ("movw %w0, %%gs" : : "q" (__gs)); \
+ } \
+ else \
+ __gs = -1; \
+ } \
+ __gs; \
+})
+
+#if defined __ASSUME_SET_THREAD_AREA_SYSCALL
+# define INIT_THREAD_SELF(descr, nr) DO_SET_THREAD_AREA (descr, nr)
+#elif defined __NR_set_thread_area
+# define INIT_THREAD_SELF(descr, nr) \
+({ \
+ if (__builtin_expect (__have_no_set_thread_area, 0) \
+ || (DO_SET_THREAD_AREA (descr, DO_SET_THREAD_AREA_REUSE (nr)) == -1 \
+ && (__have_no_set_thread_area = 1))) \
+ DO_MODIFY_LDT (descr, nr); \
+})
+/* Defined in pspinlock.c. */
+extern int __have_no_set_thread_area;
+#else
+# define INIT_THREAD_SELF(descr, nr) DO_MODIFY_LDT (descr, nr)
+#endif
+
+/* Free resources associated with thread descriptor. */
+#ifdef __ASSUME_SET_THREAD_AREA_SYSCALL
+#define FREE_THREAD(descr, nr) do { } while (0)
+#elif defined __NR_set_thread_area
+#define FREE_THREAD(descr, nr) \
+{ \
+ int __gs; \
+ __asm__ __volatile__ ("movw %%gs, %w0" : "=q" (__gs)); \
+ if (__builtin_expect (__gs & 4, 0)) \
+ { \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { nr, 0, 0, 0, 0, 1, 0, 1, 0, 0 }; \
+ __modify_ldt (1, &ldt_entry, sizeof (ldt_entry)); \
+ } \
+}
+#else
+#define FREE_THREAD(descr, nr) \
+{ \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { nr, 0, 0, 0, 0, 1, 0, 1, 0, 0 }; \
+ __modify_ldt (1, &ldt_entry, sizeof (ldt_entry)); \
+}
+#endif
+
+/* Read member of the thread descriptor directly. */
+#define THREAD_GETMEM(descr, member) \
+({ \
+ __typeof__ (descr->member) __value; \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %%gs:%P2,%b0" \
+ : "=q" (__value) \
+ : "0" (0), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %%gs:%P1,%0" \
+ : "=r" (__value) \
+ : "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movl %%gs:%P1,%%eax\n\t" \
+ "movl %%gs:%P2,%%edx" \
+ : "=A" (__value) \
+ : "i" (offsetof (struct _pthread_descr_struct, \
+ member)), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member) + 4)); \
+ } \
+ __value; \
+})
+
+/* Same as THREAD_GETMEM, but the member offset can be non-constant. */
+#define THREAD_GETMEM_NC(descr, member) \
+({ \
+ __typeof__ (descr->member) __value; \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %%gs:(%2),%b0" \
+ : "=q" (__value) \
+ : "0" (0), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %%gs:(%1),%0" \
+ : "=r" (__value) \
+ : "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movl %%gs:(%1),%%eax\n\t" \
+ "movl %%gs:4(%1),%%edx" \
+ : "=&A" (__value) \
+ : "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ } \
+ __value; \
+})
+
+/* Same as THREAD_SETMEM, but the member offset can be non-constant. */
+#define THREAD_SETMEM(descr, member, value) \
+({ \
+ __typeof__ (descr->member) __value = (value); \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %0,%%gs:%P1" : \
+ : "q" (__value), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %0,%%gs:%P1" : \
+ : "r" (__value), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movl %%eax,%%gs:%P1\n\n" \
+ "movl %%edx,%%gs:%P2" : \
+ : "A" (__value), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member)), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member) + 4)); \
+ } \
+})
+
+/* Set member of the thread descriptor directly. */
+#define THREAD_SETMEM_NC(descr, member, value) \
+({ \
+ __typeof__ (descr->member) __value = (value); \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %0,%%gs:(%1)" : \
+ : "q" (__value), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %0,%%gs:(%1)" : \
+ : "r" (__value), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movl %%eax,%%gs:(%1)\n\t" \
+ "movl %%edx,%%gs:4(%1)" : \
+ : "A" (__value), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ } \
+})
+#endif
+
+#if __ASSUME_LDT_WORKS > 0
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+
+/* Maximum size of the stack if the rlimit is unlimited. */
+#define ARCH_STACK_MAX_SIZE 8*1024*1024
+#endif