diff options
-rw-r--r-- | libjava/ChangeLog | 9 | ||||
-rw-r--r-- | libjava/posix-threads.cc | 3 | ||||
-rw-r--r-- | libjava/sysdep/i386/locks.h | 8 | ||||
-rw-r--r-- | libjava/sysdep/powerpc/locks.h | 8 |
4 files changed, 26 insertions, 2 deletions
diff --git a/libjava/ChangeLog b/libjava/ChangeLog index 38bcb31fc19..677fd8e29f8 100644 --- a/libjava/ChangeLog +++ b/libjava/ChangeLog @@ -1,6 +1,13 @@ +2002-03-20 Bryce McKinlay <bryce@waitaki.otago.ac.nz> + + * posix-threads.cc (_Jv_ThreadSelf_out_of_line): Use write_barrier, + not release_set. + * sysdep/powerpc/locks.h (write_barrier): New function. + * sysdep/i386/locks.h (write_barrier): New function. + 2002-03-19 Martin Kahlert <martin.kahlert@infineon.com> - * include/jni.h Use correct C comments + * include/jni.h Use correct C comments. 2002-03-18 Tom Tromey <tromey@redhat.com> diff --git a/libjava/posix-threads.cc b/libjava/posix-threads.cc index 6442eaffb2f..e92348bcc2c 100644 --- a/libjava/posix-threads.cc +++ b/libjava/posix-threads.cc @@ -448,7 +448,8 @@ _Jv_ThreadSelf_out_of_line(volatile self_cache_entry *sce, size_t high_sp_bits) { pthread_t self = pthread_self(); sce -> high_sp_bits = high_sp_bits; - release_set ((obj_addr_t *) &(sce -> self), self); + write_barrier(); + sce -> self = self; return self; } diff --git a/libjava/sysdep/i386/locks.h b/libjava/sysdep/i386/locks.h index 0c029ac939a..a9501ae99b2 100644 --- a/libjava/sysdep/i386/locks.h +++ b/libjava/sysdep/i386/locks.h @@ -62,4 +62,12 @@ read_barrier() { } +// Ensure that prior stores to memory are completed with respect to other +// processors. +inline static void +write_barrier() +{ + // X86 does not reorder writes. We just need to ensure that gcc also doesn't. + __asm__ __volatile__(" " : : : "memory"); +} #endif diff --git a/libjava/sysdep/powerpc/locks.h b/libjava/sysdep/powerpc/locks.h index 414b5dcb7f8..4d4532b1f83 100644 --- a/libjava/sysdep/powerpc/locks.h +++ b/libjava/sysdep/powerpc/locks.h @@ -75,4 +75,12 @@ read_barrier() __asm__ __volatile__ ("isync" : : : "memory"); } +// Ensure that prior stores to memory are completed with respect to other +// processors. +inline static void +write_barrier() +{ + __asm__ __volatile__ ("sync" : : : "memory"); +} + #endif |