diff options
author | Lorry Tar Creator <lorry-tar-importer@baserock.org> | 2011-08-09 15:52:57 +0000 |
---|---|---|
committer | Lorry <lorry@roadtrain.codethink.co.uk> | 2012-09-25 08:55:51 +0000 |
commit | c52067ca0b79daf6ffae5376481e6fef74849254 (patch) | |
tree | 9a4b19451eb85dcd6e16afb9d49a45a2f4f5aa83 /mozilla/nsprpub/pr/src/threads | |
parent | 40d4517c5b760454c076064fac192b28a22e7a37 (diff) | |
download | nspr-c52067ca0b79daf6ffae5376481e6fef74849254.tar.gz |
Imported from /srv/lorry/lorry-area/nspr/nspr-4.8.9.tar.gz.HEADnspr-4.8.9master
Diffstat (limited to 'mozilla/nsprpub/pr/src/threads')
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/.cvsignore | 1 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/Makefile.in | 94 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/.cvsignore | 1 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/Makefile.in | 79 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/README | 62 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/prucpu.c | 437 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/prucv.c | 677 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/prulock.c | 465 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/prustack.c | 206 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/combined/pruthr.c | 1887 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/prcmon.c | 463 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/prcthr.c | 426 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/prdump.c | 153 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/prmon.c | 231 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/prrwlock.c | 512 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/prsem.c | 170 | ||||
-rw-r--r-- | mozilla/nsprpub/pr/src/threads/prtpd.c | 280 |
17 files changed, 6144 insertions, 0 deletions
diff --git a/mozilla/nsprpub/pr/src/threads/.cvsignore b/mozilla/nsprpub/pr/src/threads/.cvsignore new file mode 100644 index 0000000..f3c7a7c --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/.cvsignore @@ -0,0 +1 @@ +Makefile diff --git a/mozilla/nsprpub/pr/src/threads/Makefile.in b/mozilla/nsprpub/pr/src/threads/Makefile.in new file mode 100644 index 0000000..5e6731c --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/Makefile.in @@ -0,0 +1,94 @@ +# +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the Netscape Portable Runtime (NSPR). +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998-2000 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + +#! gmake + +MOD_DEPTH = ../../.. +topsrcdir = @top_srcdir@ +srcdir = @srcdir@ +VPATH = @srcdir@ + +include $(MOD_DEPTH)/config/autoconf.mk + +include $(topsrcdir)/config/config.mk + +ifdef USE_PTHREADS + DIRS = +else +ifdef USE_BTHREADS + DIRS = +else + DIRS = combined +endif +endif + +ifdef USE_PTHREADS +CSRCS = \ + prcmon.c \ + prrwlock.c \ + prtpd.c \ + $(NULL) +else +ifdef USE_BTHREADS +CSRCS = \ + prcmon.c \ + prrwlock.c \ + prtpd.c \ + $(NULL) +else +CSRCS = \ + prcmon.c \ + prdump.c \ + prmon.c \ + prsem.c \ + prrwlock.c \ + prcthr.c \ + prtpd.c \ + $(NULL) +endif +endif + +TARGETS = $(OBJS) + +INCLUDES = -I$(dist_includedir) -I$(topsrcdir)/pr/include -I$(topsrcdir)/pr/include/private + +DEFINES += -D_NSPR_BUILD_ + +include $(topsrcdir)/config/rules.mk + +export:: $(TARGETS) + diff --git a/mozilla/nsprpub/pr/src/threads/combined/.cvsignore b/mozilla/nsprpub/pr/src/threads/combined/.cvsignore new file mode 100644 index 0000000..f3c7a7c --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/.cvsignore @@ -0,0 +1 @@ +Makefile diff --git a/mozilla/nsprpub/pr/src/threads/combined/Makefile.in b/mozilla/nsprpub/pr/src/threads/combined/Makefile.in new file mode 100644 index 0000000..476c6c9 --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/Makefile.in @@ -0,0 +1,79 @@ +# +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the Netscape Portable Runtime (NSPR). +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998-2000 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + + +#! gmake + +MOD_DEPTH = ../../../.. +topsrcdir = @top_srcdir@ +srcdir = @srcdir@ +VPATH = @srcdir@ + +include $(MOD_DEPTH)/config/autoconf.mk + +include $(topsrcdir)/config/config.mk + +# Disable optimization of the nspr on SunOS4.1.3 +ifeq ($(OS_ARCH),SunOS) +ifeq ($(OS_RELEASE),4.1.3_U1) +OPTIMIZER = +endif +endif + +ifdef USE_PTHREADS +CSRCS = \ + $(NULL) +else +CSRCS = \ + prucpu.c \ + prucv.c \ + prulock.c \ + pruthr.c \ + prustack.c \ + $(NULL) +endif + +TARGETS = $(OBJS) + +INCLUDES = -I$(dist_includedir) -I$(topsrcdir)/pr/include -I$(topsrcdir)/pr/include/private + +DEFINES += -D_NSPR_BUILD_ + +include $(topsrcdir)/config/rules.mk + +export:: $(TARGETS) + diff --git a/mozilla/nsprpub/pr/src/threads/combined/README b/mozilla/nsprpub/pr/src/threads/combined/README new file mode 100644 index 0000000..aa26665 --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/README @@ -0,0 +1,62 @@ +NSPR 2.0 evolution +------------------ + + +Phase 1- today + +Currently (Oct 10, 1996) NSPR 2.0 has two modes. Either _PR_NTHREAD +is defined, in which case the PR_CreateThread() call always creates a +native kernel thread, or _PR_NTHREAD is not defined and PR_CreateThread() +always creates user level threads within the single, original process. This +source code is reflected in two directories, nspr20/pr/src/threads/native, and +nspr20/pr/src/threads/user. Although the PR_CreateThread() function has +a paramter to specify the "scope" of a thread, this parameter is not yet +used- except on solaris where it uses it to specify bound vs unbound threads. + +Phase 2 - next week + +The next step is to provide a combination of user and native threads. The +idea, of course, is to have some small number of native threads and each of +those threads be able to run user level threads. The number of native +threads created will most likely be proportional to the number of CPUs in +the system. For this reason, the specific set of native threads which are +used to run the user-level threads will be called "CPU" threads. + +The user level threads which will be run on the CPU threads are able to +run on any of the CPU threads available, and over the course of a user-level +thread's lifetime, it may drift from one CPU thread to another. All +user-level threads will compete for processing time via a single run queue. + +Creation of a CPU thread will be primarily controlled by NSPR itself or by +the user running a function PR_Concurrency(). The details of PR_Concurrency() +have not yet been worked out; but the idea is that the user can specify to +NSPR how many CPU threads are desired. + +In this system, user-level threads are created by using PR_CreateThread() and +specifying the PR_LOCAL_SCOPE option. LOCAL_SCOPE indicates that the thread +will be under the control of the "local" scheduler. Creating threads with +GLOBAL_SCOPE, on the other hand will create a thread which is under the +control of the system's scheduler. In otherwords, this creates a native thread +which is not a CPU thread; it runs a single thread task and never has more +than one task to run. LOCAL_SCOPE is much like creating a Solaris unbound +thread, while GLOBAL_SCOPE is similar to creating a Solaris bound thread. + +To implement this architecture, the source code will still maintain the "user" +and "native" directories which is has today. However a third directory +"combined" will also exist. To compile a version of NSPR which only creates +native threads, the user can define _PR_NTHREAD. For exclusive user-level +threads, do not define _PR_NTHREAD. To get the combined threads, define +_PR_NTHREAD and _PR_USE_CPUS. + + +Phase 3 - later than next week + +The goal is to eliminate the 3 directories. Once these three models are in +place, the remaining work will be to eliminate the native and user thread +directories for all platforms, so that the entire thread model is contained +within what is today called the "combined" model. This new and glorious +source code will attempt to make the "combined" model on any platforms which +provide the necessary underlying native threading, but will also be +capable of using exclusive user-level threads on systems which don't have +native threads. + diff --git a/mozilla/nsprpub/pr/src/threads/combined/prucpu.c b/mozilla/nsprpub/pr/src/threads/combined/prucpu.c new file mode 100644 index 0000000..599925d --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/prucpu.c @@ -0,0 +1,437 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +_PRCPU *_pr_primordialCPU = NULL; + +PRInt32 _pr_md_idle_cpus; /* number of idle cpus */ +/* + * The idle threads in MxN models increment/decrement _pr_md_idle_cpus. + * If _PR_HAVE_ATOMIC_OPS is not defined, they can't use the atomic + * increment/decrement routines (which are based on PR_Lock/PR_Unlock), + * because PR_Lock asserts that the calling thread is not an idle thread. + * So we use a _MDLock to protect _pr_md_idle_cpus. + */ +#if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) +#ifndef _PR_HAVE_ATOMIC_OPS +static _MDLock _pr_md_idle_cpus_lock; +#endif +#endif +PRUintn _pr_numCPU; +PRInt32 _pr_cpus_exit; +PRUint32 _pr_cpu_affinity_mask = 0; + +#if !defined (_PR_GLOBAL_THREADS_ONLY) + +static PRUintn _pr_cpuID; + +static void PR_CALLBACK _PR_CPU_Idle(void *); + +static _PRCPU *_PR_CreateCPU(void); +static PRStatus _PR_StartCPU(_PRCPU *cpu, PRThread *thread); + +#if !defined(_PR_LOCAL_THREADS_ONLY) +static void _PR_RunCPU(void *arg); +#endif + +void _PR_InitCPUs() +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if (_native_threads_only) + return; + + _pr_cpuID = 0; + _MD_NEW_LOCK( &_pr_cpuLock); +#if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) +#ifndef _PR_HAVE_ATOMIC_OPS + _MD_NEW_LOCK(&_pr_md_idle_cpus_lock); +#endif +#endif + +#ifdef _PR_LOCAL_THREADS_ONLY + +#ifdef HAVE_CUSTOM_USER_THREADS + _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me); +#endif + + /* Now start the first CPU. */ + _pr_primordialCPU = _PR_CreateCPU(); + _pr_numCPU = 1; + _PR_StartCPU(_pr_primordialCPU, me); + + _PR_MD_SET_CURRENT_CPU(_pr_primordialCPU); + + /* Initialize cpu for current thread (could be different from me) */ + _PR_MD_CURRENT_THREAD()->cpu = _pr_primordialCPU; + + _PR_MD_SET_LAST_THREAD(me); + +#else /* Combined MxN model */ + + _pr_primordialCPU = _PR_CreateCPU(); + _pr_numCPU = 1; + _PR_CreateThread(PR_SYSTEM_THREAD, + _PR_RunCPU, + _pr_primordialCPU, + PR_PRIORITY_NORMAL, + PR_GLOBAL_THREAD, + PR_UNJOINABLE_THREAD, + 0, + _PR_IDLE_THREAD); + +#endif /* _PR_LOCAL_THREADS_ONLY */ + + _PR_MD_INIT_CPUS(); +} + +#ifdef WINNT +/* + * Right now this function merely stops the CPUs and does + * not do any other cleanup. + * + * It is only implemented for WINNT because bug 161998 only + * affects the WINNT version of NSPR, but it would be nice + * to implement this function for other platforms too. + */ +void _PR_CleanupCPUs(void) +{ + PRUintn i; + PRCList *qp; + _PRCPU *cpu; + + _pr_cpus_exit = 1; + for (i = 0; i < _pr_numCPU; i++) { + _PR_MD_WAKEUP_WAITER(NULL); + } + for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) { + cpu = _PR_CPU_PTR(qp); + _PR_MD_JOIN_THREAD(&cpu->thread->md); + } +} +#endif + +static _PRCPUQueue *_PR_CreateCPUQueue(void) +{ + PRInt32 index; + _PRCPUQueue *cpuQueue; + cpuQueue = PR_NEWZAP(_PRCPUQueue); + + _MD_NEW_LOCK( &cpuQueue->runQLock ); + _MD_NEW_LOCK( &cpuQueue->sleepQLock ); + _MD_NEW_LOCK( &cpuQueue->miscQLock ); + + for (index = 0; index < PR_PRIORITY_LAST + 1; index++) + PR_INIT_CLIST( &(cpuQueue->runQ[index]) ); + PR_INIT_CLIST( &(cpuQueue->sleepQ) ); + PR_INIT_CLIST( &(cpuQueue->pauseQ) ); + PR_INIT_CLIST( &(cpuQueue->suspendQ) ); + PR_INIT_CLIST( &(cpuQueue->waitingToJoinQ) ); + + cpuQueue->numCPUs = 1; + + return cpuQueue; +} + +/* + * Create a new CPU. + * + * This function initializes enough of the _PRCPU structure so + * that it can be accessed safely by a global thread or another + * CPU. This function does not create the native thread that + * will run the CPU nor does it initialize the parts of _PRCPU + * that must be initialized by that native thread. + * + * The reason we cannot simply have the native thread create + * and fully initialize a new CPU is that we need to be able to + * create a usable _pr_primordialCPU in _PR_InitCPUs without + * assuming that the primordial CPU thread we created can run + * during NSPR initialization. For example, on Windows while + * new threads can be created by DllMain, they won't be able + * to run during DLL initialization. If NSPR is initialized + * by DllMain, the primordial CPU thread won't run until DLL + * initialization is finished. + */ +static _PRCPU *_PR_CreateCPU(void) +{ + _PRCPU *cpu; + + cpu = PR_NEWZAP(_PRCPU); + if (cpu) { + cpu->queue = _PR_CreateCPUQueue(); + if (!cpu->queue) { + PR_DELETE(cpu); + return NULL; + } + } + return cpu; +} + +/* + * Start a new CPU. + * + * 'cpu' is a _PRCPU structure created by _PR_CreateCPU(). + * 'thread' is the native thread that will run the CPU. + * + * If this function fails, 'cpu' is destroyed. + */ +static PRStatus _PR_StartCPU(_PRCPU *cpu, PRThread *thread) +{ + /* + ** Start a new cpu. The assumption this code makes is that the + ** underlying operating system creates a stack to go with the new + ** native thread. That stack will be used by the cpu when pausing. + */ + + PR_ASSERT(!_native_threads_only); + + cpu->last_clock = PR_IntervalNow(); + + /* Before we create any threads on this CPU we have to + * set the current CPU + */ + _PR_MD_SET_CURRENT_CPU(cpu); + _PR_MD_INIT_RUNNING_CPU(cpu); + thread->cpu = cpu; + + cpu->idle_thread = _PR_CreateThread(PR_SYSTEM_THREAD, + _PR_CPU_Idle, + (void *)cpu, + PR_PRIORITY_NORMAL, + PR_LOCAL_THREAD, + PR_UNJOINABLE_THREAD, + 0, + _PR_IDLE_THREAD); + + if (!cpu->idle_thread) { + /* didn't clean up CPU queue XXXMB */ + PR_DELETE(cpu); + return PR_FAILURE; + } + PR_ASSERT(cpu->idle_thread->cpu == cpu); + + cpu->idle_thread->no_sched = 0; + + cpu->thread = thread; + + if (_pr_cpu_affinity_mask) + PR_SetThreadAffinityMask(thread, _pr_cpu_affinity_mask); + + /* Created and started a new CPU */ + _PR_CPU_LIST_LOCK(); + cpu->id = _pr_cpuID++; + PR_APPEND_LINK(&cpu->links, &_PR_CPUQ()); + _PR_CPU_LIST_UNLOCK(); + + return PR_SUCCESS; +} + +#if !defined(_PR_GLOBAL_THREADS_ONLY) && !defined(_PR_LOCAL_THREADS_ONLY) +/* +** This code is used during a cpu's initial creation. +*/ +static void _PR_RunCPU(void *arg) +{ + _PRCPU *cpu = (_PRCPU *)arg; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + PR_ASSERT(NULL != me); + + /* + * _PR_StartCPU calls _PR_CreateThread to create the + * idle thread. Because _PR_CreateThread calls PR_Lock, + * the current thread has to remain a global thread + * during the _PR_StartCPU call so that it can wait for + * the lock if the lock is held by another thread. If + * we clear the _PR_GLOBAL_SCOPE flag in + * _PR_MD_CREATE_PRIMORDIAL_THREAD, the current thread + * will be treated as a local thread and have trouble + * waiting for the lock because the CPU is not fully + * constructed yet. + * + * After the CPU is started, it is safe to mark the + * current thread as a local thread. + */ + +#ifdef HAVE_CUSTOM_USER_THREADS + _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me); +#endif + + me->no_sched = 1; + _PR_StartCPU(cpu, me); + +#ifdef HAVE_CUSTOM_USER_THREADS + me->flags &= (~_PR_GLOBAL_SCOPE); +#endif + + _PR_MD_SET_CURRENT_CPU(cpu); + _PR_MD_SET_CURRENT_THREAD(cpu->thread); + me->cpu = cpu; + + while(1) { + PRInt32 is; + if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); + _PR_MD_START_INTERRUPTS(); + _PR_MD_SWITCH_CONTEXT(me); + } +} +#endif + +static void PR_CALLBACK _PR_CPU_Idle(void *_cpu) +{ + _PRCPU *cpu = (_PRCPU *)_cpu; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + PR_ASSERT(NULL != me); + + me->cpu = cpu; + cpu->idle_thread = me; + if (_MD_LAST_THREAD()) + _MD_LAST_THREAD()->no_sched = 0; + if (!_PR_IS_NATIVE_THREAD(me)) _PR_MD_SET_INTSOFF(0); + while(1) { + PRInt32 is; + PRIntervalTime timeout; + if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); + + _PR_RUNQ_LOCK(cpu); +#if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) +#ifdef _PR_HAVE_ATOMIC_OPS + _PR_MD_ATOMIC_INCREMENT(&_pr_md_idle_cpus); +#else + _PR_MD_LOCK(&_pr_md_idle_cpus_lock); + _pr_md_idle_cpus++; + _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock); +#endif /* _PR_HAVE_ATOMIC_OPS */ +#endif + /* If someone on runq; do a nonblocking PAUSECPU */ + if (_PR_RUNQREADYMASK(me->cpu) != 0) { + _PR_RUNQ_UNLOCK(cpu); + timeout = PR_INTERVAL_NO_WAIT; + } else { + _PR_RUNQ_UNLOCK(cpu); + + _PR_SLEEPQ_LOCK(cpu); + if (PR_CLIST_IS_EMPTY(&_PR_SLEEPQ(me->cpu))) { + timeout = PR_INTERVAL_NO_TIMEOUT; + } else { + PRThread *wakeThread; + wakeThread = _PR_THREAD_PTR(_PR_SLEEPQ(me->cpu).next); + timeout = wakeThread->sleep; + } + _PR_SLEEPQ_UNLOCK(cpu); + } + + /* Wait for an IO to complete */ + (void)_PR_MD_PAUSE_CPU(timeout); + +#ifdef WINNT + if (_pr_cpus_exit) { + /* _PR_CleanupCPUs tells us to exit */ + _PR_MD_END_THREAD(); + } +#endif + +#if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) +#ifdef _PR_HAVE_ATOMIC_OPS + _PR_MD_ATOMIC_DECREMENT(&_pr_md_idle_cpus); +#else + _PR_MD_LOCK(&_pr_md_idle_cpus_lock); + _pr_md_idle_cpus--; + _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock); +#endif /* _PR_HAVE_ATOMIC_OPS */ +#endif + + _PR_ClockInterrupt(); + + /* Now schedule any thread that is on the runq + * INTS must be OFF when calling PR_Schedule() + */ + me->state = _PR_RUNNABLE; + _PR_MD_SWITCH_CONTEXT(me); + if (!_PR_IS_NATIVE_THREAD(me)) _PR_FAST_INTSON(is); + } +} +#endif /* _PR_GLOBAL_THREADS_ONLY */ + +PR_IMPLEMENT(void) PR_SetConcurrency(PRUintn numCPUs) +{ +#if defined(_PR_GLOBAL_THREADS_ONLY) || defined(_PR_LOCAL_THREADS_ONLY) + + /* do nothing */ + +#else /* combined, MxN thread model */ + + PRUintn newCPU; + _PRCPU *cpu; + PRThread *thr; + + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + if (_native_threads_only) + return; + + _PR_CPU_LIST_LOCK(); + if (_pr_numCPU < numCPUs) { + newCPU = numCPUs - _pr_numCPU; + _pr_numCPU = numCPUs; + } else newCPU = 0; + _PR_CPU_LIST_UNLOCK(); + + for (; newCPU; newCPU--) { + cpu = _PR_CreateCPU(); + thr = _PR_CreateThread(PR_SYSTEM_THREAD, + _PR_RunCPU, + cpu, + PR_PRIORITY_NORMAL, + PR_GLOBAL_THREAD, + PR_UNJOINABLE_THREAD, + 0, + _PR_IDLE_THREAD); + } +#endif +} + +PR_IMPLEMENT(_PRCPU *) _PR_GetPrimordialCPU(void) +{ + if (_pr_primordialCPU) + return _pr_primordialCPU; + else + return _PR_MD_CURRENT_CPU(); +} diff --git a/mozilla/nsprpub/pr/src/threads/combined/prucv.c b/mozilla/nsprpub/pr/src/threads/combined/prucv.c new file mode 100644 index 0000000..6d5d668 --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/prucv.c @@ -0,0 +1,677 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + + +#include "primpl.h" +#include "prinrval.h" +#include "prtypes.h" + +#if defined(WIN95) +/* +** Some local variables report warnings on Win95 because the code paths +** using them are conditioned on HAVE_CUSTOME_USER_THREADS. +** The pragma suppresses the warning. +** +*/ +#pragma warning(disable : 4101) +#endif + + +/* +** Notify one thread that it has finished waiting on a condition variable +** Caller must hold the _PR_CVAR_LOCK(cv) +*/ +PRBool _PR_NotifyThread (PRThread *thread, PRThread *me) +{ + PRBool rv; + + PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); + + _PR_THREAD_LOCK(thread); + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + if ( !_PR_IS_NATIVE_THREAD(thread) ) { + if (thread->wait.cvar != NULL) { + thread->wait.cvar = NULL; + + _PR_SLEEPQ_LOCK(thread->cpu); + /* The notify and timeout can collide; in which case both may + * attempt to delete from the sleepQ; only let one do it. + */ + if (thread->flags & (_PR_ON_SLEEPQ|_PR_ON_PAUSEQ)) + _PR_DEL_SLEEPQ(thread, PR_TRUE); + _PR_SLEEPQ_UNLOCK(thread->cpu); + + if (thread->flags & _PR_SUSPENDING) { + /* + * set thread state to SUSPENDED; a Resume operation + * on the thread will move it to the runQ + */ + thread->state = _PR_SUSPENDED; + _PR_MISCQ_LOCK(thread->cpu); + _PR_ADD_SUSPENDQ(thread, thread->cpu); + _PR_MISCQ_UNLOCK(thread->cpu); + _PR_THREAD_UNLOCK(thread); + } else { + /* Make thread runnable */ + thread->state = _PR_RUNNABLE; + _PR_THREAD_UNLOCK(thread); + + _PR_AddThreadToRunQ(me, thread); + _PR_MD_WAKEUP_WAITER(thread); + } + + rv = PR_TRUE; + } else { + /* Thread has already been notified */ + _PR_THREAD_UNLOCK(thread); + rv = PR_FALSE; + } + } else { /* If the thread is a native thread */ + if (thread->wait.cvar) { + thread->wait.cvar = NULL; + + if (thread->flags & _PR_SUSPENDING) { + /* + * set thread state to SUSPENDED; a Resume operation + * on the thread will enable the thread to run + */ + thread->state = _PR_SUSPENDED; + } else + thread->state = _PR_RUNNING; + _PR_THREAD_UNLOCK(thread); + _PR_MD_WAKEUP_WAITER(thread); + rv = PR_TRUE; + } else { + _PR_THREAD_UNLOCK(thread); + rv = PR_FALSE; + } + } + + return rv; +} + +/* + * Notify thread waiting on cvar; called when thread is interrupted + * The thread lock is held on entry and released before return + */ +void _PR_NotifyLockedThread (PRThread *thread) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PRCondVar *cvar; + PRThreadPriority pri; + + if ( !_PR_IS_NATIVE_THREAD(me)) + PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); + + cvar = thread->wait.cvar; + thread->wait.cvar = NULL; + _PR_THREAD_UNLOCK(thread); + + _PR_CVAR_LOCK(cvar); + _PR_THREAD_LOCK(thread); + + if (!_PR_IS_NATIVE_THREAD(thread)) { + _PR_SLEEPQ_LOCK(thread->cpu); + /* The notify and timeout can collide; in which case both may + * attempt to delete from the sleepQ; only let one do it. + */ + if (thread->flags & (_PR_ON_SLEEPQ|_PR_ON_PAUSEQ)) + _PR_DEL_SLEEPQ(thread, PR_TRUE); + _PR_SLEEPQ_UNLOCK(thread->cpu); + + /* Make thread runnable */ + pri = thread->priority; + thread->state = _PR_RUNNABLE; + + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + + _PR_AddThreadToRunQ(me, thread); + _PR_THREAD_UNLOCK(thread); + + _PR_MD_WAKEUP_WAITER(thread); + } else { + if (thread->flags & _PR_SUSPENDING) { + /* + * set thread state to SUSPENDED; a Resume operation + * on the thread will enable the thread to run + */ + thread->state = _PR_SUSPENDED; + } else + thread->state = _PR_RUNNING; + _PR_THREAD_UNLOCK(thread); + _PR_MD_WAKEUP_WAITER(thread); + } + + _PR_CVAR_UNLOCK(cvar); + return; +} + +/* +** Make the given thread wait for the given condition variable +*/ +PRStatus _PR_WaitCondVar( + PRThread *thread, PRCondVar *cvar, PRLock *lock, PRIntervalTime timeout) +{ + PRIntn is; + PRStatus rv = PR_SUCCESS; + + PR_ASSERT(thread == _PR_MD_CURRENT_THREAD()); + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + +#ifdef _PR_GLOBAL_THREADS_ONLY + if (_PR_PENDING_INTERRUPT(thread)) { + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); + thread->flags &= ~_PR_INTERRUPT; + return PR_FAILURE; + } + + thread->wait.cvar = cvar; + lock->owner = NULL; + _PR_MD_WAIT_CV(&cvar->md,&lock->ilock, timeout); + thread->wait.cvar = NULL; + lock->owner = thread; + if (_PR_PENDING_INTERRUPT(thread)) { + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); + thread->flags &= ~_PR_INTERRUPT; + return PR_FAILURE; + } + + return PR_SUCCESS; +#else /* _PR_GLOBAL_THREADS_ONLY */ + + if ( !_PR_IS_NATIVE_THREAD(thread)) + _PR_INTSOFF(is); + + _PR_CVAR_LOCK(cvar); + _PR_THREAD_LOCK(thread); + + if (_PR_PENDING_INTERRUPT(thread)) { + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); + thread->flags &= ~_PR_INTERRUPT; + _PR_CVAR_UNLOCK(cvar); + _PR_THREAD_UNLOCK(thread); + if ( !_PR_IS_NATIVE_THREAD(thread)) + _PR_INTSON(is); + return PR_FAILURE; + } + + thread->state = _PR_COND_WAIT; + thread->wait.cvar = cvar; + + /* + ** Put the caller thread on the condition variable's wait Q + */ + PR_APPEND_LINK(&thread->waitQLinks, &cvar->condQ); + + /* Note- for global scope threads, we don't put them on the + * global sleepQ, so each global thread must put itself + * to sleep only for the time it wants to. + */ + if ( !_PR_IS_NATIVE_THREAD(thread) ) { + _PR_SLEEPQ_LOCK(thread->cpu); + _PR_ADD_SLEEPQ(thread, timeout); + _PR_SLEEPQ_UNLOCK(thread->cpu); + } + _PR_CVAR_UNLOCK(cvar); + _PR_THREAD_UNLOCK(thread); + + /* + ** Release lock protecting the condition variable and thereby giving time + ** to the next thread which can potentially notify on the condition variable + */ + PR_Unlock(lock); + + PR_LOG(_pr_cvar_lm, PR_LOG_MIN, + ("PR_Wait: cvar=%p waiting for %d", cvar, timeout)); + + rv = _PR_MD_WAIT(thread, timeout); + + _PR_CVAR_LOCK(cvar); + PR_REMOVE_LINK(&thread->waitQLinks); + _PR_CVAR_UNLOCK(cvar); + + PR_LOG(_pr_cvar_lm, PR_LOG_MIN, + ("PR_Wait: cvar=%p done waiting", cvar)); + + if ( !_PR_IS_NATIVE_THREAD(thread)) + _PR_INTSON(is); + + /* Acquire lock again that we had just relinquished */ + PR_Lock(lock); + + if (_PR_PENDING_INTERRUPT(thread)) { + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); + thread->flags &= ~_PR_INTERRUPT; + return PR_FAILURE; + } + + return rv; +#endif /* _PR_GLOBAL_THREADS_ONLY */ +} + +void _PR_NotifyCondVar(PRCondVar *cvar, PRThread *me) +{ +#ifdef _PR_GLOBAL_THREADS_ONLY + _PR_MD_NOTIFY_CV(&cvar->md, &cvar->lock->ilock); +#else /* _PR_GLOBAL_THREADS_ONLY */ + + PRCList *q; + PRIntn is; + + if ( !_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); + + _PR_CVAR_LOCK(cvar); + q = cvar->condQ.next; + while (q != &cvar->condQ) { + PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("_PR_NotifyCondVar: cvar=%p", cvar)); + if (_PR_THREAD_CONDQ_PTR(q)->wait.cvar) { + if (_PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me) == PR_TRUE) + break; + } + q = q->next; + } + _PR_CVAR_UNLOCK(cvar); + + if ( !_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + +#endif /* _PR_GLOBAL_THREADS_ONLY */ +} + +/* +** Cndition variable debugging log info. +*/ +PRUint32 _PR_CondVarToString(PRCondVar *cvar, char *buf, PRUint32 buflen) +{ + PRUint32 nb; + + if (cvar->lock->owner) { + nb = PR_snprintf(buf, buflen, "[%p] owner=%ld[%p]", + cvar, cvar->lock->owner->id, cvar->lock->owner); + } else { + nb = PR_snprintf(buf, buflen, "[%p]", cvar); + } + return nb; +} + +/* +** Expire condition variable waits that are ready to expire. "now" is the current +** time. +*/ +void _PR_ClockInterrupt(void) +{ + PRThread *thread, *me = _PR_MD_CURRENT_THREAD(); + _PRCPU *cpu = me->cpu; + PRIntervalTime elapsed, now; + + PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); + /* Figure out how much time elapsed since the last clock tick */ + now = PR_IntervalNow(); + elapsed = now - cpu->last_clock; + cpu->last_clock = now; + + PR_LOG(_pr_clock_lm, PR_LOG_MAX, + ("ExpireWaits: elapsed=%lld usec", elapsed)); + + while(1) { + _PR_SLEEPQ_LOCK(cpu); + if (_PR_SLEEPQ(cpu).next == &_PR_SLEEPQ(cpu)) { + _PR_SLEEPQ_UNLOCK(cpu); + break; + } + + thread = _PR_THREAD_PTR(_PR_SLEEPQ(cpu).next); + PR_ASSERT(thread->cpu == cpu); + + if (elapsed < thread->sleep) { + thread->sleep -= elapsed; + _PR_SLEEPQMAX(thread->cpu) -= elapsed; + _PR_SLEEPQ_UNLOCK(cpu); + break; + } + _PR_SLEEPQ_UNLOCK(cpu); + + PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); + + _PR_THREAD_LOCK(thread); + + if (thread->cpu != cpu) { + /* + ** The thread was switched to another CPU + ** between the time we unlocked the sleep + ** queue and the time we acquired the thread + ** lock, so it is none of our business now. + */ + _PR_THREAD_UNLOCK(thread); + continue; + } + + /* + ** Consume this sleeper's amount of elapsed time from the elapsed + ** time value. The next remaining piece of elapsed time will be + ** available for the next sleeping thread's timer. + */ + _PR_SLEEPQ_LOCK(cpu); + PR_ASSERT(!(thread->flags & _PR_ON_PAUSEQ)); + if (thread->flags & _PR_ON_SLEEPQ) { + _PR_DEL_SLEEPQ(thread, PR_FALSE); + elapsed -= thread->sleep; + _PR_SLEEPQ_UNLOCK(cpu); + } else { + /* Thread was already handled; Go get another one */ + _PR_SLEEPQ_UNLOCK(cpu); + _PR_THREAD_UNLOCK(thread); + continue; + } + + /* Notify the thread waiting on the condition variable */ + if (thread->flags & _PR_SUSPENDING) { + PR_ASSERT((thread->state == _PR_IO_WAIT) || + (thread->state == _PR_COND_WAIT)); + /* + ** Thread is suspended and its condition timeout + ** expired. Transfer thread from sleepQ to suspendQ. + */ + thread->wait.cvar = NULL; + _PR_MISCQ_LOCK(cpu); + thread->state = _PR_SUSPENDED; + _PR_ADD_SUSPENDQ(thread, cpu); + _PR_MISCQ_UNLOCK(cpu); + } else { + if (thread->wait.cvar) { + PRThreadPriority pri; + + /* Do work very similar to what _PR_NotifyThread does */ + PR_ASSERT( !_PR_IS_NATIVE_THREAD(thread) ); + + /* Make thread runnable */ + pri = thread->priority; + thread->state = _PR_RUNNABLE; + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + + PR_ASSERT(thread->cpu == cpu); + _PR_RUNQ_LOCK(cpu); + _PR_ADD_RUNQ(thread, cpu, pri); + _PR_RUNQ_UNLOCK(cpu); + + if (pri > me->priority) + _PR_SET_RESCHED_FLAG(); + + thread->wait.cvar = NULL; + + _PR_MD_WAKEUP_WAITER(thread); + + } else if (thread->io_pending == PR_TRUE) { + /* Need to put IO sleeper back on runq */ + int pri = thread->priority; + + thread->io_suspended = PR_TRUE; +#ifdef WINNT + /* + * For NT, record the cpu on which I/O was issued + * I/O cancellation is done on the same cpu + */ + thread->md.thr_bound_cpu = cpu; +#endif + + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + PR_ASSERT(thread->cpu == cpu); + thread->state = _PR_RUNNABLE; + _PR_RUNQ_LOCK(cpu); + _PR_ADD_RUNQ(thread, cpu, pri); + _PR_RUNQ_UNLOCK(cpu); + } + } + _PR_THREAD_UNLOCK(thread); + } +} + +/************************************************************************/ + +/* +** Create a new condition variable. +** "lock" is the lock to use with the condition variable. +** +** Condition variables are synchronization objects that threads can use +** to wait for some condition to occur. +** +** This may fail if memory is tight or if some operating system resource +** is low. +*/ +PR_IMPLEMENT(PRCondVar*) PR_NewCondVar(PRLock *lock) +{ + PRCondVar *cvar; + + PR_ASSERT(lock != NULL); + + cvar = PR_NEWZAP(PRCondVar); + if (cvar) { +#ifdef _PR_GLOBAL_THREADS_ONLY + if(_PR_MD_NEW_CV(&cvar->md)) { + PR_DELETE(cvar); + PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); + return NULL; + } +#endif + if (_PR_MD_NEW_LOCK(&(cvar->ilock)) == PR_FAILURE) { + PR_DELETE(cvar); + PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); + return NULL; + } + cvar->lock = lock; + PR_INIT_CLIST(&cvar->condQ); + + } else { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + } + return cvar; +} + +/* +** Destroy a condition variable. There must be no thread +** waiting on the condvar. The caller is responsible for guaranteeing +** that the condvar is no longer in use. +** +*/ +PR_IMPLEMENT(void) PR_DestroyCondVar(PRCondVar *cvar) +{ + PR_ASSERT(cvar->condQ.next == &cvar->condQ); + +#ifdef _PR_GLOBAL_THREADS_ONLY + _PR_MD_FREE_CV(&cvar->md); +#endif + _PR_MD_FREE_LOCK(&(cvar->ilock)); + + PR_DELETE(cvar); +} + +/* +** Wait for a notify on the condition variable. Sleep for "tiemout" amount +** of ticks (if "timeout" is zero then the sleep is indefinite). While +** the thread is waiting it unlocks lock. When the wait has +** finished the thread regains control of the condition variable after +** locking the associated lock. +** +** The thread waiting on the condvar will be resumed when the condvar is +** notified (assuming the thread is the next in line to receive the +** notify) or when the timeout elapses. +** +** Returns PR_FAILURE if the caller has not locked the lock associated +** with the condition variable or the thread has been interrupted. +*/ +extern PRThread *suspendAllThread; +PR_IMPLEMENT(PRStatus) PR_WaitCondVar(PRCondVar *cvar, PRIntervalTime timeout) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + + PR_ASSERT(cvar->lock->owner == me); + PR_ASSERT(me != suspendAllThread); + if (cvar->lock->owner != me) return PR_FAILURE; + + return _PR_WaitCondVar(me, cvar, cvar->lock, timeout); +} + +/* +** Notify the highest priority thread waiting on the condition +** variable. If a thread is waiting on the condition variable (using +** PR_Wait) then it is awakened and begins waiting on the lock. +*/ +PR_IMPLEMENT(PRStatus) PR_NotifyCondVar(PRCondVar *cvar) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + + PR_ASSERT(cvar->lock->owner == me); + PR_ASSERT(me != suspendAllThread); + if (cvar->lock->owner != me) return PR_FAILURE; + + _PR_NotifyCondVar(cvar, me); + return PR_SUCCESS; +} + +/* +** Notify all of the threads waiting on the condition variable. All of +** threads are notified in turn. The highest priority thread will +** probably acquire the lock. +*/ +PR_IMPLEMENT(PRStatus) PR_NotifyAllCondVar(PRCondVar *cvar) +{ + PRCList *q; + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + PR_ASSERT(cvar->lock->owner == me); + if (cvar->lock->owner != me) return PR_FAILURE; + +#ifdef _PR_GLOBAL_THREADS_ONLY + _PR_MD_NOTIFYALL_CV(&cvar->md, &cvar->lock->ilock); + return PR_SUCCESS; +#else /* _PR_GLOBAL_THREADS_ONLY */ + if ( !_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + _PR_CVAR_LOCK(cvar); + q = cvar->condQ.next; + while (q != &cvar->condQ) { + PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("PR_NotifyAll: cvar=%p", cvar)); + _PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me); + q = q->next; + } + _PR_CVAR_UNLOCK(cvar); + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + + return PR_SUCCESS; +#endif /* _PR_GLOBAL_THREADS_ONLY */ +} + + +/*********************************************************************/ +/*********************************************************************/ +/********************ROUTINES FOR DCE EMULATION***********************/ +/*********************************************************************/ +/*********************************************************************/ +#include "prpdce.h" + +PR_IMPLEMENT(PRCondVar*) PRP_NewNakedCondVar(void) +{ + PRCondVar *cvar = PR_NEWZAP(PRCondVar); + if (NULL != cvar) + { + if (_PR_MD_NEW_LOCK(&(cvar->ilock)) == PR_FAILURE) + { + PR_DELETE(cvar); cvar = NULL; + } + else + { + PR_INIT_CLIST(&cvar->condQ); + cvar->lock = _PR_NAKED_CV_LOCK; + } + + } + return cvar; +} + +PR_IMPLEMENT(void) PRP_DestroyNakedCondVar(PRCondVar *cvar) +{ + PR_ASSERT(cvar->condQ.next == &cvar->condQ); + PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); + + _PR_MD_FREE_LOCK(&(cvar->ilock)); + + PR_DELETE(cvar); +} + +PR_IMPLEMENT(PRStatus) PRP_NakedWait( + PRCondVar *cvar, PRLock *lock, PRIntervalTime timeout) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); + return _PR_WaitCondVar(me, cvar, lock, timeout); +} /* PRP_NakedWait */ + +PR_IMPLEMENT(PRStatus) PRP_NakedNotify(PRCondVar *cvar) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); + + _PR_NotifyCondVar(cvar, me); + + return PR_SUCCESS; +} /* PRP_NakedNotify */ + +PR_IMPLEMENT(PRStatus) PRP_NakedBroadcast(PRCondVar *cvar) +{ + PRCList *q; + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); + + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); + _PR_MD_LOCK( &(cvar->ilock) ); + q = cvar->condQ.next; + while (q != &cvar->condQ) { + PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("PR_NotifyAll: cvar=%p", cvar)); + _PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me); + q = q->next; + } + _PR_MD_UNLOCK( &(cvar->ilock) ); + if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); + + return PR_SUCCESS; +} /* PRP_NakedBroadcast */ + diff --git a/mozilla/nsprpub/pr/src/threads/combined/prulock.c b/mozilla/nsprpub/pr/src/threads/combined/prulock.c new file mode 100644 index 0000000..b188ed5 --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/prulock.c @@ -0,0 +1,465 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +#if defined(WIN95) +/* +** Some local variables report warnings on Win95 because the code paths +** using them are conditioned on HAVE_CUSTOME_USER_THREADS. +** The pragma suppresses the warning. +** +*/ +#pragma warning(disable : 4101) +#endif + + +void _PR_InitLocks(void) +{ + _PR_MD_INIT_LOCKS(); +} + +/* +** Deal with delayed interrupts/requested reschedule during interrupt +** re-enables. +*/ +void _PR_IntsOn(_PRCPU *cpu) +{ + PRUintn missed, pri, i; + _PRInterruptTable *it; + PRThread *me; + + PR_ASSERT(cpu); /* Global threads don't have CPUs */ + PR_ASSERT(_PR_MD_GET_INTSOFF() > 0); + me = _PR_MD_CURRENT_THREAD(); + PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); + + /* + ** Process delayed interrupts. This logic is kinda scary because we + ** need to avoid losing an interrupt (it's ok to delay an interrupt + ** until later). + ** + ** There are two missed state words. _pr_ints.where indicates to the + ** interrupt handler which state word is currently safe for + ** modification. + ** + ** This code scans both interrupt state words, using the where flag + ** to indicate to the interrupt which state word is safe for writing. + ** If an interrupt comes in during a scan the other word will be + ** modified. This modification will be noticed during the next + ** iteration of the loop or during the next call to this routine. + */ + for (i = 0; i < 2; i++) { + cpu->where = (1 - i); + missed = cpu->u.missed[i]; + if (missed != 0) { + cpu->u.missed[i] = 0; + for (it = _pr_interruptTable; it->name; it++) { + if (missed & it->missed_bit) { + PR_LOG(_pr_sched_lm, PR_LOG_MIN, + ("IntsOn[0]: %s intr", it->name)); + (*it->handler)(); + } + } + } + } + + if (cpu->u.missed[3] != 0) { + _PRCPU *cpu; + + _PR_THREAD_LOCK(me); + me->state = _PR_RUNNABLE; + pri = me->priority; + + cpu = me->cpu; + _PR_RUNQ_LOCK(cpu); + _PR_ADD_RUNQ(me, cpu, pri); + _PR_RUNQ_UNLOCK(cpu); + _PR_THREAD_UNLOCK(me); + _PR_MD_SWITCH_CONTEXT(me); + } +} + +/* +** Unblock the first runnable waiting thread. Skip over +** threads that are trying to be suspended +** Note: Caller must hold _PR_LOCK_LOCK() +*/ +void _PR_UnblockLockWaiter(PRLock *lock) +{ + PRThread *t = NULL; + PRThread *me; + PRCList *q; + + q = lock->waitQ.next; + PR_ASSERT(q != &lock->waitQ); + while (q != &lock->waitQ) { + /* Unblock first waiter */ + t = _PR_THREAD_CONDQ_PTR(q); + + /* + ** We are about to change the thread's state to runnable and for local + ** threads, we are going to assign a cpu to it. So, protect thread's + ** data structure. + */ + _PR_THREAD_LOCK(t); + + if (t->flags & _PR_SUSPENDING) { + q = q->next; + _PR_THREAD_UNLOCK(t); + continue; + } + + /* Found a runnable thread */ + PR_ASSERT(t->state == _PR_LOCK_WAIT); + PR_ASSERT(t->wait.lock == lock); + t->wait.lock = 0; + PR_REMOVE_LINK(&t->waitQLinks); /* take it off lock's waitQ */ + + /* + ** If this is a native thread, nothing else to do except to wake it + ** up by calling the machine dependent wakeup routine. + ** + ** If this is a local thread, we need to assign it a cpu and + ** put the thread on that cpu's run queue. There are two cases to + ** take care of. If the currently running thread is also a local + ** thread, we just assign our own cpu to that thread and put it on + ** the cpu's run queue. If the the currently running thread is a + ** native thread, we assign the primordial cpu to it (on NT, + ** MD_WAKEUP handles the cpu assignment). + */ + + if ( !_PR_IS_NATIVE_THREAD(t) ) { + + t->state = _PR_RUNNABLE; + + me = _PR_MD_CURRENT_THREAD(); + + _PR_AddThreadToRunQ(me, t); + _PR_THREAD_UNLOCK(t); + } else { + t->state = _PR_RUNNING; + _PR_THREAD_UNLOCK(t); + } + _PR_MD_WAKEUP_WAITER(t); + break; + } + return; +} + +/************************************************************************/ + + +PR_IMPLEMENT(PRLock*) PR_NewLock(void) +{ + PRLock *lock; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + lock = PR_NEWZAP(PRLock); + if (lock) { + if (_PR_MD_NEW_LOCK(&lock->ilock) == PR_FAILURE) { + PR_DELETE(lock); + return(NULL); + } + PR_INIT_CLIST(&lock->links); + PR_INIT_CLIST(&lock->waitQ); + } + return lock; +} + +/* +** Destroy the given lock "lock". There is no point in making this race +** free because if some other thread has the pointer to this lock all +** bets are off. +*/ +PR_IMPLEMENT(void) PR_DestroyLock(PRLock *lock) +{ + PR_ASSERT(lock->owner == 0); + _PR_MD_FREE_LOCK(&lock->ilock); + PR_DELETE(lock); +} + +extern PRThread *suspendAllThread; +/* +** Lock the lock. +*/ +PR_IMPLEMENT(void) PR_Lock(PRLock *lock) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PRIntn is; + PRThread *t; + PRCList *q; + + PR_ASSERT(me != suspendAllThread); + PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); + PR_ASSERT(lock != NULL); +#ifdef _PR_GLOBAL_THREADS_ONLY + PR_ASSERT(lock->owner != me); + _PR_MD_LOCK(&lock->ilock); + lock->owner = me; + return; +#else /* _PR_GLOBAL_THREADS_ONLY */ + + if (_native_threads_only) { + PR_ASSERT(lock->owner != me); + _PR_MD_LOCK(&lock->ilock); + lock->owner = me; + return; + } + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + + PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); + +retry: + _PR_LOCK_LOCK(lock); + if (lock->owner == 0) { + /* Just got the lock */ + lock->owner = me; + lock->priority = me->priority; + /* Add the granted lock to this owning thread's lock list */ + PR_APPEND_LINK(&lock->links, &me->lockList); + _PR_LOCK_UNLOCK(lock); + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_FAST_INTSON(is); + return; + } + + /* If this thread already owns this lock, then it is a deadlock */ + PR_ASSERT(lock->owner != me); + + PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); + +#if 0 + if (me->priority > lock->owner->priority) { + /* + ** Give the lock owner a priority boost until we get the + ** lock. Record the priority we boosted it to. + */ + lock->boostPriority = me->priority; + _PR_SetThreadPriority(lock->owner, me->priority); + } +#endif + + /* + Add this thread to the asked for lock's list of waiting threads. We + add this thread thread in the right priority order so when the unlock + occurs, the thread with the higher priority will get the lock. + */ + q = lock->waitQ.next; + if (q == &lock->waitQ || _PR_THREAD_CONDQ_PTR(q)->priority == + _PR_THREAD_CONDQ_PTR(lock->waitQ.prev)->priority) { + /* + * If all the threads in the lock waitQ have the same priority, + * then avoid scanning the list: insert the element at the end. + */ + q = &lock->waitQ; + } else { + /* Sort thread into lock's waitQ at appropriate point */ + /* Now scan the list for where to insert this entry */ + while (q != &lock->waitQ) { + t = _PR_THREAD_CONDQ_PTR(lock->waitQ.next); + if (me->priority > t->priority) { + /* Found a lower priority thread to insert in front of */ + break; + } + q = q->next; + } + } + PR_INSERT_BEFORE(&me->waitQLinks, q); + + /* + Now grab the threadLock since we are about to change the state. We have + to do this since a PR_Suspend or PR_SetThreadPriority type call that takes + a PRThread* as an argument could be changing the state of this thread from + a thread running on a different cpu. + */ + + _PR_THREAD_LOCK(me); + me->state = _PR_LOCK_WAIT; + me->wait.lock = lock; + _PR_THREAD_UNLOCK(me); + + _PR_LOCK_UNLOCK(lock); + + _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); + goto retry; + +#endif /* _PR_GLOBAL_THREADS_ONLY */ +} + +/* +** Unlock the lock. +*/ +PR_IMPLEMENT(PRStatus) PR_Unlock(PRLock *lock) +{ + PRCList *q; + PRThreadPriority pri, boost; + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + PR_ASSERT(lock != NULL); + PR_ASSERT(lock->owner == me); + PR_ASSERT(me != suspendAllThread); + PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); + if (lock->owner != me) { + return PR_FAILURE; + } + +#ifdef _PR_GLOBAL_THREADS_ONLY + lock->owner = 0; + _PR_MD_UNLOCK(&lock->ilock); + return PR_SUCCESS; +#else /* _PR_GLOBAL_THREADS_ONLY */ + + if (_native_threads_only) { + lock->owner = 0; + _PR_MD_UNLOCK(&lock->ilock); + return PR_SUCCESS; + } + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + _PR_LOCK_LOCK(lock); + + /* Remove the lock from the owning thread's lock list */ + PR_REMOVE_LINK(&lock->links); + pri = lock->priority; + boost = lock->boostPriority; + if (boost > pri) { + /* + ** We received a priority boost during the time we held the lock. + ** We need to figure out what priority to move to by scanning + ** down our list of lock's that we are still holding and using + ** the highest boosted priority found. + */ + q = me->lockList.next; + while (q != &me->lockList) { + PRLock *ll = _PR_LOCK_PTR(q); + if (ll->boostPriority > pri) { + pri = ll->boostPriority; + } + q = q->next; + } + if (pri != me->priority) { + _PR_SetThreadPriority(me, pri); + } + } + + /* Unblock the first waiting thread */ + q = lock->waitQ.next; + if (q != &lock->waitQ) + _PR_UnblockLockWaiter(lock); + lock->boostPriority = PR_PRIORITY_LOW; + lock->owner = 0; + _PR_LOCK_UNLOCK(lock); + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + return PR_SUCCESS; +#endif /* _PR_GLOBAL_THREADS_ONLY */ +} + +/* +** If the current thread owns |lock|, this assertion is guaranteed to +** succeed. Otherwise, the behavior of this function is undefined. +*/ +PR_IMPLEMENT(void) PR_AssertCurrentThreadOwnsLock(PRLock *lock) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PR_ASSERT(lock->owner == me); +} + +/* +** Test and then lock the lock if it's not already locked by some other +** thread. Return PR_FALSE if some other thread owned the lock at the +** time of the call. +*/ +PR_IMPLEMENT(PRBool) PR_TestAndLock(PRLock *lock) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PRBool rv = PR_FALSE; + PRIntn is; + +#ifdef _PR_GLOBAL_THREADS_ONLY + is = _PR_MD_TEST_AND_LOCK(&lock->ilock); + if (is == 0) { + lock->owner = me; + return PR_TRUE; + } + return PR_FALSE; +#else /* _PR_GLOBAL_THREADS_ONLY */ + +#ifndef _PR_LOCAL_THREADS_ONLY + if (_native_threads_only) { + is = _PR_MD_TEST_AND_LOCK(&lock->ilock); + if (is == 0) { + lock->owner = me; + return PR_TRUE; + } + return PR_FALSE; + } +#endif + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + + _PR_LOCK_LOCK(lock); + if (lock->owner == 0) { + /* Just got the lock */ + lock->owner = me; + lock->priority = me->priority; + /* Add the granted lock to this owning thread's lock list */ + PR_APPEND_LINK(&lock->links, &me->lockList); + rv = PR_TRUE; + } + _PR_LOCK_UNLOCK(lock); + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + return rv; +#endif /* _PR_GLOBAL_THREADS_ONLY */ +} + +/************************************************************************/ +/************************************************************************/ +/***********************ROUTINES FOR DCE EMULATION***********************/ +/************************************************************************/ +/************************************************************************/ +PR_IMPLEMENT(PRStatus) PRP_TryLock(PRLock *lock) + { return (PR_TestAndLock(lock)) ? PR_SUCCESS : PR_FAILURE; } diff --git a/mozilla/nsprpub/pr/src/threads/combined/prustack.c b/mozilla/nsprpub/pr/src/threads/combined/prustack.c new file mode 100644 index 0000000..fe15843 --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/prustack.c @@ -0,0 +1,206 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +/* List of free stack virtual memory chunks */ +PRLock *_pr_stackLock; +PRCList _pr_freeStacks = PR_INIT_STATIC_CLIST(&_pr_freeStacks); +PRIntn _pr_numFreeStacks; +PRIntn _pr_maxFreeStacks = 4; + +#ifdef DEBUG +/* +** A variable that can be set via the debugger... +*/ +PRBool _pr_debugStacks = PR_FALSE; +#endif + +/* How much space to leave between the stacks, at each end */ +#define REDZONE (2 << _pr_pageShift) + +#define _PR_THREAD_STACK_PTR(_qp) \ + ((PRThreadStack*) ((char*) (_qp) - offsetof(PRThreadStack,links))) + +void _PR_InitStacks(void) +{ + _pr_stackLock = PR_NewLock(); +} + +void _PR_CleanupStacks(void) +{ + if (_pr_stackLock) { + PR_DestroyLock(_pr_stackLock); + _pr_stackLock = NULL; + } +} + +/* +** Allocate a stack for a thread. +*/ +PRThreadStack *_PR_NewStack(PRUint32 stackSize) +{ + PRCList *qp; + PRThreadStack *ts; + PRThread *thr; + + /* + ** Trim the list of free stacks. Trim it backwards, tossing out the + ** oldest stack found first (this way more recent stacks have a + ** chance of being present in the data cache). + */ + PR_Lock(_pr_stackLock); + qp = _pr_freeStacks.prev; + while ((_pr_numFreeStacks > _pr_maxFreeStacks) && (qp != &_pr_freeStacks)) { + ts = _PR_THREAD_STACK_PTR(qp); + thr = _PR_THREAD_STACK_TO_PTR(ts); + qp = qp->prev; + /* + * skip stacks which are still being used + */ + if (thr->no_sched) + continue; + PR_REMOVE_LINK(&ts->links); + + /* Give platform OS to clear out the stack for debugging */ + _PR_MD_CLEAR_STACK(ts); + + _pr_numFreeStacks--; + _PR_DestroySegment(ts->seg); + PR_DELETE(ts); + } + + /* + ** Find a free thread stack. This searches the list of free'd up + ** virtually mapped thread stacks. + */ + qp = _pr_freeStacks.next; + ts = 0; + while (qp != &_pr_freeStacks) { + ts = _PR_THREAD_STACK_PTR(qp); + thr = _PR_THREAD_STACK_TO_PTR(ts); + qp = qp->next; + /* + * skip stacks which are still being used + */ + if ((!(thr->no_sched)) && ((ts->allocSize - 2*REDZONE) >= stackSize)) { + /* + ** Found a stack that is not in use and is big enough. Change + ** stackSize to fit it. + */ + stackSize = ts->allocSize - 2*REDZONE; + PR_REMOVE_LINK(&ts->links); + _pr_numFreeStacks--; + ts->links.next = 0; + ts->links.prev = 0; + PR_Unlock(_pr_stackLock); + goto done; + } + ts = 0; + } + PR_Unlock(_pr_stackLock); + + if (!ts) { + /* Make a new thread stack object. */ + ts = PR_NEWZAP(PRThreadStack); + if (!ts) { + return NULL; + } + + /* + ** Assign some of the virtual space to the new stack object. We + ** may not get that piece of VM, but if nothing else we will + ** advance the pointer so we don't collide (unless the OS screws + ** up). + */ + ts->allocSize = stackSize + 2*REDZONE; + ts->seg = _PR_NewSegment(ts->allocSize, 0); + if (!ts->seg) { + PR_DELETE(ts); + return NULL; + } + } + + done: + ts->allocBase = (char*)ts->seg->vaddr; + ts->flags = _PR_STACK_MAPPED; + ts->stackSize = stackSize; + +#ifdef HAVE_STACK_GROWING_UP + ts->stackTop = ts->allocBase + REDZONE; + ts->stackBottom = ts->stackTop + stackSize; +#else + ts->stackBottom = ts->allocBase + REDZONE; + ts->stackTop = ts->stackBottom + stackSize; +#endif + + PR_LOG(_pr_thread_lm, PR_LOG_NOTICE, + ("thread stack: base=0x%x limit=0x%x bottom=0x%x top=0x%x\n", + ts->allocBase, ts->allocBase + ts->allocSize - 1, + ts->allocBase + REDZONE, + ts->allocBase + REDZONE + stackSize - 1)); + + _PR_MD_INIT_STACK(ts,REDZONE); + + return ts; +} + +/* +** Free the stack for the current thread +*/ +void _PR_FreeStack(PRThreadStack *ts) +{ + if (!ts) { + return; + } + if (ts->flags & _PR_STACK_PRIMORDIAL) { + PR_DELETE(ts); + return; + } + + /* + ** Put the stack on the free list. This is done because we are still + ** using the stack. Next time a thread is created we will trim the + ** list down; it's safe to do it then because we will have had to + ** context switch to a live stack before another thread can be + ** created. + */ + PR_Lock(_pr_stackLock); + PR_APPEND_LINK(&ts->links, _pr_freeStacks.prev); + _pr_numFreeStacks++; + PR_Unlock(_pr_stackLock); +} diff --git a/mozilla/nsprpub/pr/src/threads/combined/pruthr.c b/mozilla/nsprpub/pr/src/threads/combined/pruthr.c new file mode 100644 index 0000000..2fb7add --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/combined/pruthr.c @@ -0,0 +1,1887 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" +#include <signal.h> +#include <string.h> + +#if defined(WIN95) +/* +** Some local variables report warnings on Win95 because the code paths +** using them are conditioned on HAVE_CUSTOME_USER_THREADS. +** The pragma suppresses the warning. +** +*/ +#pragma warning(disable : 4101) +#endif + +/* _pr_activeLock protects the following global variables */ +PRLock *_pr_activeLock; +PRInt32 _pr_primordialExitCount; /* In PR_Cleanup(), the primordial thread + * waits until all other user (non-system) + * threads have terminated before it exits. + * So whenever we decrement _pr_userActive, + * it is compared with + * _pr_primordialExitCount. + * If the primordial thread is a system + * thread, then _pr_primordialExitCount + * is 0. If the primordial thread is + * itself a user thread, then + * _pr_primordialThread is 1. + */ +PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to + * _pr_primordialExitCount, this condition + * variable is notified. + */ + +PRLock *_pr_deadQLock; +PRUint32 _pr_numNativeDead; +PRUint32 _pr_numUserDead; +PRCList _pr_deadNativeQ; +PRCList _pr_deadUserQ; + +PRUint32 _pr_join_counter; + +PRUint32 _pr_local_threads; +PRUint32 _pr_global_threads; + +PRBool suspendAllOn = PR_FALSE; +PRThread *suspendAllThread = NULL; + +extern PRCList _pr_active_global_threadQ; +extern PRCList _pr_active_local_threadQ; + +static void _PR_DecrActiveThreadCount(PRThread *thread); +static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack *); +static void _PR_InitializeNativeStack(PRThreadStack *ts); +static void _PR_InitializeRecycledThread(PRThread *thread); +static void _PR_UserRunThread(void); + +void _PR_InitThreads(PRThreadType type, PRThreadPriority priority, + PRUintn maxPTDs) +{ + PRThread *thread; + PRThreadStack *stack; + + _pr_terminationCVLock = PR_NewLock(); + _pr_activeLock = PR_NewLock(); + +#ifndef HAVE_CUSTOM_USER_THREADS + stack = PR_NEWZAP(PRThreadStack); +#ifdef HAVE_STACK_GROWING_UP + stack->stackTop = (char*) ((((long)&type) >> _pr_pageShift) + << _pr_pageShift); +#else +#if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS) + stack->stackTop = (char*) &thread; +#else + stack->stackTop = (char*) ((((long)&type + _pr_pageSize - 1) + >> _pr_pageShift) << _pr_pageShift); +#endif +#endif +#else + /* If stack is NULL, we're using custom user threads like NT fibers. */ + stack = PR_NEWZAP(PRThreadStack); + if (stack) { + stack->stackSize = 0; + _PR_InitializeNativeStack(stack); + } +#endif /* HAVE_CUSTOM_USER_THREADS */ + + thread = _PR_AttachThread(type, priority, stack); + if (thread) { + _PR_MD_SET_CURRENT_THREAD(thread); + + if (type == PR_SYSTEM_THREAD) { + thread->flags = _PR_SYSTEM; + _pr_systemActive++; + _pr_primordialExitCount = 0; + } else { + _pr_userActive++; + _pr_primordialExitCount = 1; + } + thread->no_sched = 1; + _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock); + } + + if (!thread) PR_Abort(); +#ifdef _PR_LOCAL_THREADS_ONLY + thread->flags |= _PR_PRIMORDIAL; +#else + thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE; +#endif + + /* + * Needs _PR_PRIMORDIAL flag set before calling + * _PR_MD_INIT_THREAD() + */ + if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { + /* + * XXX do what? + */ + } + + if (_PR_IS_NATIVE_THREAD(thread)) { + PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); + _pr_global_threads++; + } else { + PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); + _pr_local_threads++; + } + + _pr_recycleThreads = 0; + _pr_deadQLock = PR_NewLock(); + _pr_numNativeDead = 0; + _pr_numUserDead = 0; + PR_INIT_CLIST(&_pr_deadNativeQ); + PR_INIT_CLIST(&_pr_deadUserQ); +} + +void _PR_CleanupThreads(void) +{ + if (_pr_terminationCVLock) { + PR_DestroyLock(_pr_terminationCVLock); + _pr_terminationCVLock = NULL; + } + if (_pr_activeLock) { + PR_DestroyLock(_pr_activeLock); + _pr_activeLock = NULL; + } + if (_pr_primordialExitCVar) { + PR_DestroyCondVar(_pr_primordialExitCVar); + _pr_primordialExitCVar = NULL; + } + /* TODO _pr_dead{Native,User}Q need to be deleted */ + if (_pr_deadQLock) { + PR_DestroyLock(_pr_deadQLock); + _pr_deadQLock = NULL; + } +} + +/* +** Initialize a stack for a native thread +*/ +static void _PR_InitializeNativeStack(PRThreadStack *ts) +{ + if( ts && (ts->stackTop == 0) ) { + ts->allocSize = ts->stackSize; + + /* + ** Setup stackTop and stackBottom values. + */ +#ifdef HAVE_STACK_GROWING_UP + ts->allocBase = (char*) ((((long)&ts) >> _pr_pageShift) + << _pr_pageShift); + ts->stackBottom = ts->allocBase + ts->stackSize; + ts->stackTop = ts->allocBase; +#else + ts->allocBase = (char*) ((((long)&ts + _pr_pageSize - 1) + >> _pr_pageShift) << _pr_pageShift); + ts->stackTop = ts->allocBase; + ts->stackBottom = ts->allocBase - ts->stackSize; +#endif + } +} + +void _PR_NotifyJoinWaiters(PRThread *thread) +{ + /* + ** Handle joinable threads. Change the state to waiting for join. + ** Remove from our run Q and put it on global waiting to join Q. + ** Notify on our "termination" condition variable so that joining + ** thread will know about our termination. Switch our context and + ** come back later on to continue the cleanup. + */ + PR_ASSERT(thread == _PR_MD_CURRENT_THREAD()); + if (thread->term != NULL) { + PR_Lock(_pr_terminationCVLock); + _PR_THREAD_LOCK(thread); + thread->state = _PR_JOIN_WAIT; + if ( !_PR_IS_NATIVE_THREAD(thread) ) { + _PR_MISCQ_LOCK(thread->cpu); + _PR_ADD_JOINQ(thread, thread->cpu); + _PR_MISCQ_UNLOCK(thread->cpu); + } + _PR_THREAD_UNLOCK(thread); + PR_NotifyCondVar(thread->term); + PR_Unlock(_pr_terminationCVLock); + _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); + PR_ASSERT(thread->state != _PR_JOIN_WAIT); + } + +} + +/* + * Zero some of the data members of a recycled thread. + * + * Note that we can do this either when a dead thread is added to + * the dead thread queue or when it is reused. Here, we are doing + * this lazily, when the thread is reused in _PR_CreateThread(). + */ +static void _PR_InitializeRecycledThread(PRThread *thread) +{ + /* + * Assert that the following data members are already zeroed + * by _PR_CleanupThread(). + */ +#ifdef DEBUG + if (thread->privateData) { + unsigned int i; + for (i = 0; i < thread->tpdLength; i++) { + PR_ASSERT(thread->privateData[i] == NULL); + } + } +#endif + PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0); + PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0); + PR_ASSERT(thread->errorStringLength == 0); + + /* Reset data members in thread structure */ + thread->errorCode = thread->osErrorCode = 0; + thread->io_pending = thread->io_suspended = PR_FALSE; + thread->environment = 0; + PR_INIT_CLIST(&thread->lockList); +} + +PRStatus _PR_RecycleThread(PRThread *thread) +{ + if ( _PR_IS_NATIVE_THREAD(thread) && + _PR_NUM_DEADNATIVE < _pr_recycleThreads) { + _PR_DEADQ_LOCK; + PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ); + _PR_INC_DEADNATIVE; + _PR_DEADQ_UNLOCK; + return (PR_SUCCESS); + } else if ( !_PR_IS_NATIVE_THREAD(thread) && + _PR_NUM_DEADUSER < _pr_recycleThreads) { + _PR_DEADQ_LOCK; + PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ); + _PR_INC_DEADUSER; + _PR_DEADQ_UNLOCK; + return (PR_SUCCESS); + } + return (PR_FAILURE); +} + +/* + * Decrement the active thread count, either _pr_systemActive or + * _pr_userActive, depending on whether the thread is a system thread + * or a user thread. If all the user threads, except possibly + * the primordial thread, have terminated, we notify the primordial + * thread of this condition. + * + * Since this function will lock _pr_activeLock, do not call this + * function while holding the _pr_activeLock lock, as this will result + * in a deadlock. + */ + +static void +_PR_DecrActiveThreadCount(PRThread *thread) +{ + PR_Lock(_pr_activeLock); + if (thread->flags & _PR_SYSTEM) { + _pr_systemActive--; + } else { + _pr_userActive--; + if (_pr_userActive == _pr_primordialExitCount) { + PR_NotifyCondVar(_pr_primordialExitCVar); + } + } + PR_Unlock(_pr_activeLock); +} + +/* +** Detach thread structure +*/ +static void +_PR_DestroyThread(PRThread *thread) +{ + _PR_MD_FREE_LOCK(&thread->threadLock); + PR_DELETE(thread); +} + +void +_PR_NativeDestroyThread(PRThread *thread) +{ + if(thread->term) { + PR_DestroyCondVar(thread->term); + thread->term = 0; + } + if (NULL != thread->privateData) { + PR_ASSERT(0 != thread->tpdLength); + PR_DELETE(thread->privateData); + thread->tpdLength = 0; + } + PR_DELETE(thread->stack); + _PR_DestroyThread(thread); +} + +void +_PR_UserDestroyThread(PRThread *thread) +{ + if(thread->term) { + PR_DestroyCondVar(thread->term); + thread->term = 0; + } + if (NULL != thread->privateData) { + PR_ASSERT(0 != thread->tpdLength); + PR_DELETE(thread->privateData); + thread->tpdLength = 0; + } + _PR_MD_FREE_LOCK(&thread->threadLock); + if (thread->threadAllocatedOnStack == 1) { + _PR_MD_CLEAN_THREAD(thread); + /* + * Because the no_sched field is set, this thread/stack will + * will not be re-used until the flag is cleared by the thread + * we will context switch to. + */ + _PR_FreeStack(thread->stack); + } else { +#ifdef WINNT + _PR_MD_CLEAN_THREAD(thread); +#else + /* + * This assertion does not apply to NT. On NT, every fiber + * has its threadAllocatedOnStack equal to 0. Elsewhere, + * only the primordial thread has its threadAllocatedOnStack + * equal to 0. + */ + PR_ASSERT(thread->flags & _PR_PRIMORDIAL); +#endif + } +} + + +/* +** Run a thread's start function. When the start function returns the +** thread is done executing and no longer needs the CPU. If there are no +** more user threads running then we can exit the program. +*/ +void _PR_NativeRunThread(void *arg) +{ + PRThread *thread = (PRThread *)arg; + + _PR_MD_SET_CURRENT_THREAD(thread); + + _PR_MD_SET_CURRENT_CPU(NULL); + + /* Set up the thread stack information */ + _PR_InitializeNativeStack(thread->stack); + + /* Set up the thread md information */ + if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { + /* + * thread failed to initialize itself, possibly due to + * failure to allocate per-thread resources + */ + return; + } + + while(1) { + thread->state = _PR_RUNNING; + + /* + * Add to list of active threads + */ + PR_Lock(_pr_activeLock); + PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); + _pr_global_threads++; + PR_Unlock(_pr_activeLock); + + (*thread->startFunc)(thread->arg); + + /* + * The following two assertions are meant for NT asynch io. + * + * The thread should have no asynch io in progress when it + * exits, otherwise the overlapped buffer, which is part of + * the thread structure, would become invalid. + */ + PR_ASSERT(thread->io_pending == PR_FALSE); + /* + * This assertion enforces the programming guideline that + * if an io function times out or is interrupted, the thread + * should close the fd to force the asynch io to abort + * before it exits. Right now, closing the fd is the only + * way to clear the io_suspended flag. + */ + PR_ASSERT(thread->io_suspended == PR_FALSE); + + /* + * remove thread from list of active threads + */ + PR_Lock(_pr_activeLock); + PR_REMOVE_LINK(&thread->active); + _pr_global_threads--; + PR_Unlock(_pr_activeLock); + + PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); + + /* All done, time to go away */ + _PR_CleanupThread(thread); + + _PR_NotifyJoinWaiters(thread); + + _PR_DecrActiveThreadCount(thread); + + thread->state = _PR_DEAD_STATE; + + if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == + PR_FAILURE)) { + /* + * thread not recycled + * platform-specific thread exit processing + * - for stuff like releasing native-thread resources, etc. + */ + _PR_MD_EXIT_THREAD(thread); + /* + * Free memory allocated for the thread + */ + _PR_NativeDestroyThread(thread); + /* + * thread gone, cannot de-reference thread now + */ + return; + } + + /* Now wait for someone to activate us again... */ + _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); + } +} + +static void _PR_UserRunThread(void) +{ + PRThread *thread = _PR_MD_CURRENT_THREAD(); + PRIntn is; + + if (_MD_LAST_THREAD()) + _MD_LAST_THREAD()->no_sched = 0; + +#ifdef HAVE_CUSTOM_USER_THREADS + if (thread->stack == NULL) { + thread->stack = PR_NEWZAP(PRThreadStack); + _PR_InitializeNativeStack(thread->stack); + } +#endif /* HAVE_CUSTOM_USER_THREADS */ + + while(1) { + /* Run thread main */ + if ( !_PR_IS_NATIVE_THREAD(thread)) _PR_MD_SET_INTSOFF(0); + + /* + * Add to list of active threads + */ + if (!(thread->flags & _PR_IDLE_THREAD)) { + PR_Lock(_pr_activeLock); + PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); + _pr_local_threads++; + PR_Unlock(_pr_activeLock); + } + + (*thread->startFunc)(thread->arg); + + /* + * The following two assertions are meant for NT asynch io. + * + * The thread should have no asynch io in progress when it + * exits, otherwise the overlapped buffer, which is part of + * the thread structure, would become invalid. + */ + PR_ASSERT(thread->io_pending == PR_FALSE); + /* + * This assertion enforces the programming guideline that + * if an io function times out or is interrupted, the thread + * should close the fd to force the asynch io to abort + * before it exits. Right now, closing the fd is the only + * way to clear the io_suspended flag. + */ + PR_ASSERT(thread->io_suspended == PR_FALSE); + + PR_Lock(_pr_activeLock); + /* + * remove thread from list of active threads + */ + if (!(thread->flags & _PR_IDLE_THREAD)) { + PR_REMOVE_LINK(&thread->active); + _pr_local_threads--; + } + PR_Unlock(_pr_activeLock); + PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); + + /* All done, time to go away */ + _PR_CleanupThread(thread); + + _PR_INTSOFF(is); + + _PR_NotifyJoinWaiters(thread); + + _PR_DecrActiveThreadCount(thread); + + thread->state = _PR_DEAD_STATE; + + if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == + PR_FAILURE)) { + /* + ** Destroy the thread resources + */ + _PR_UserDestroyThread(thread); + } + + /* + ** Find another user thread to run. This cpu has finished the + ** previous threads main and is now ready to run another thread. + */ + { + PRInt32 is; + _PR_INTSOFF(is); + _PR_MD_SWITCH_CONTEXT(thread); + } + + /* Will land here when we get scheduled again if we are recycling... */ + } +} + +void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PRIntn is; + + if ( _PR_IS_NATIVE_THREAD(thread) ) { + _PR_MD_SET_PRIORITY(&(thread->md), newPri); + return; + } + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + _PR_THREAD_LOCK(thread); + if (newPri != thread->priority) { + _PRCPU *cpu = thread->cpu; + + switch (thread->state) { + case _PR_RUNNING: + /* Change my priority */ + + _PR_RUNQ_LOCK(cpu); + thread->priority = newPri; + if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) { + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_SET_RESCHED_FLAG(); + } + _PR_RUNQ_UNLOCK(cpu); + break; + + case _PR_RUNNABLE: + + _PR_RUNQ_LOCK(cpu); + /* Move to different runQ */ + _PR_DEL_RUNQ(thread); + thread->priority = newPri; + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + _PR_ADD_RUNQ(thread, cpu, newPri); + _PR_RUNQ_UNLOCK(cpu); + + if (newPri > me->priority) { + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_SET_RESCHED_FLAG(); + } + + break; + + case _PR_LOCK_WAIT: + case _PR_COND_WAIT: + case _PR_IO_WAIT: + case _PR_SUSPENDED: + + thread->priority = newPri; + break; + } + } + _PR_THREAD_UNLOCK(thread); + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); +} + +/* +** Suspend the named thread and copy its gc registers into regBuf +*/ +static void _PR_Suspend(PRThread *thread) +{ + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + PR_ASSERT(thread != me); + PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu)); + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + _PR_THREAD_LOCK(thread); + switch (thread->state) { + case _PR_RUNNABLE: + if (!_PR_IS_NATIVE_THREAD(thread)) { + _PR_RUNQ_LOCK(thread->cpu); + _PR_DEL_RUNQ(thread); + _PR_RUNQ_UNLOCK(thread->cpu); + + _PR_MISCQ_LOCK(thread->cpu); + _PR_ADD_SUSPENDQ(thread, thread->cpu); + _PR_MISCQ_UNLOCK(thread->cpu); + } else { + /* + * Only LOCAL threads are suspended by _PR_Suspend + */ + PR_ASSERT(0); + } + thread->state = _PR_SUSPENDED; + break; + + case _PR_RUNNING: + /* + * The thread being suspended should be a LOCAL thread with + * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state + */ + PR_ASSERT(0); + break; + + case _PR_LOCK_WAIT: + case _PR_IO_WAIT: + case _PR_COND_WAIT: + if (_PR_IS_NATIVE_THREAD(thread)) { + _PR_MD_SUSPEND_THREAD(thread); + } + thread->flags |= _PR_SUSPENDING; + break; + + default: + PR_Abort(); + } + _PR_THREAD_UNLOCK(thread); + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); +} + +static void _PR_Resume(PRThread *thread) +{ + PRThreadPriority pri; + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + _PR_THREAD_LOCK(thread); + switch (thread->state) { + case _PR_SUSPENDED: + thread->state = _PR_RUNNABLE; + thread->flags &= ~_PR_SUSPENDING; + if (!_PR_IS_NATIVE_THREAD(thread)) { + _PR_MISCQ_LOCK(thread->cpu); + _PR_DEL_SUSPENDQ(thread); + _PR_MISCQ_UNLOCK(thread->cpu); + + pri = thread->priority; + + _PR_RUNQ_LOCK(thread->cpu); + _PR_ADD_RUNQ(thread, thread->cpu, pri); + _PR_RUNQ_UNLOCK(thread->cpu); + + if (pri > _PR_MD_CURRENT_THREAD()->priority) { + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_SET_RESCHED_FLAG(); + } + } else { + PR_ASSERT(0); + } + break; + + case _PR_IO_WAIT: + case _PR_COND_WAIT: + thread->flags &= ~_PR_SUSPENDING; +/* PR_ASSERT(thread->wait.monitor->stickyCount == 0); */ + break; + + case _PR_LOCK_WAIT: + { + PRLock *wLock = thread->wait.lock; + + thread->flags &= ~_PR_SUSPENDING; + + _PR_LOCK_LOCK(wLock); + if (thread->wait.lock->owner == 0) { + _PR_UnblockLockWaiter(thread->wait.lock); + } + _PR_LOCK_UNLOCK(wLock); + break; + } + case _PR_RUNNABLE: + break; + case _PR_RUNNING: + /* + * The thread being suspended should be a LOCAL thread with + * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state + */ + PR_ASSERT(0); + break; + + default: + /* + * thread should have been in one of the above-listed blocked states + * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE) + */ + PR_Abort(); + } + _PR_THREAD_UNLOCK(thread); + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + +} + +#if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) +static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus) +{ + PRThread *thread; + PRIntn pri; + PRUint32 r; + PRCList *qp; + PRIntn priMin, priMax; + + _PR_RUNQ_LOCK(cpu); + r = _PR_RUNQREADYMASK(cpu); + if (r==0) { + priMin = priMax = PR_PRIORITY_FIRST; + } else if (r == (1<<PR_PRIORITY_NORMAL) ) { + priMin = priMax = PR_PRIORITY_NORMAL; + } else { + priMin = PR_PRIORITY_FIRST; + priMax = PR_PRIORITY_LAST; + } + thread = NULL; + for (pri = priMax; pri >= priMin ; pri-- ) { + if (r & (1 << pri)) { + for (qp = _PR_RUNQ(cpu)[pri].next; + qp != &_PR_RUNQ(cpu)[pri]; + qp = qp->next) { + thread = _PR_THREAD_PTR(qp); + /* + * skip non-schedulable threads + */ + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + if (thread->no_sched) { + thread = NULL; + /* + * Need to wakeup cpus to avoid missing a + * runnable thread + * Waking up all CPU's need happen only once. + */ + + *wakeup_cpus = PR_TRUE; + continue; + } else if (thread->flags & _PR_BOUND_THREAD) { + /* + * Thread bound to cpu 0 + */ + + thread = NULL; +#ifdef IRIX + _PR_MD_WAKEUP_PRIMORDIAL_CPU(); +#endif + continue; + } else if (thread->io_pending == PR_TRUE) { + /* + * A thread that is blocked for I/O needs to run + * on the same cpu on which it was blocked. This is because + * the cpu's ioq is accessed without lock protection and scheduling + * the thread on a different cpu would preclude this optimization. + */ + thread = NULL; + continue; + } else { + /* Pull thread off of its run queue */ + _PR_DEL_RUNQ(thread); + _PR_RUNQ_UNLOCK(cpu); + return(thread); + } + } + } + thread = NULL; + } + _PR_RUNQ_UNLOCK(cpu); + return(thread); +} +#endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */ + +/* +** Schedule this native thread by finding the highest priority nspr +** thread that is ready to run. +** +** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls +** PR_Schedule() rather than calling PR_Schedule. Otherwise if there +** is initialization required for switching from SWITCH_CONTEXT, +** it will not get done! +*/ +void _PR_Schedule(void) +{ + PRThread *thread, *me = _PR_MD_CURRENT_THREAD(); + _PRCPU *cpu = _PR_MD_CURRENT_CPU(); + PRIntn pri; + PRUint32 r; + PRCList *qp; + PRIntn priMin, priMax; +#if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) + PRBool wakeup_cpus; +#endif + + /* Interrupts must be disabled */ + PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); + + /* Since we are rescheduling, we no longer want to */ + _PR_CLEAR_RESCHED_FLAG(); + + /* + ** Find highest priority thread to run. Bigger priority numbers are + ** higher priority threads + */ + _PR_RUNQ_LOCK(cpu); + /* + * if we are in SuspendAll mode, can schedule only the thread + * that called PR_SuspendAll + * + * The thread may be ready to run now, after completing an I/O + * operation, for example + */ + if ((thread = suspendAllThread) != 0) { + if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) { + /* Pull thread off of its run queue */ + _PR_DEL_RUNQ(thread); + _PR_RUNQ_UNLOCK(cpu); + goto found_thread; + } else { + thread = NULL; + _PR_RUNQ_UNLOCK(cpu); + goto idle_thread; + } + } + r = _PR_RUNQREADYMASK(cpu); + if (r==0) { + priMin = priMax = PR_PRIORITY_FIRST; + } else if (r == (1<<PR_PRIORITY_NORMAL) ) { + priMin = priMax = PR_PRIORITY_NORMAL; + } else { + priMin = PR_PRIORITY_FIRST; + priMax = PR_PRIORITY_LAST; + } + thread = NULL; + for (pri = priMax; pri >= priMin ; pri-- ) { + if (r & (1 << pri)) { + for (qp = _PR_RUNQ(cpu)[pri].next; + qp != &_PR_RUNQ(cpu)[pri]; + qp = qp->next) { + thread = _PR_THREAD_PTR(qp); + /* + * skip non-schedulable threads + */ + PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); + if ((thread->no_sched) && (me != thread)){ + thread = NULL; + continue; + } else { + /* Pull thread off of its run queue */ + _PR_DEL_RUNQ(thread); + _PR_RUNQ_UNLOCK(cpu); + goto found_thread; + } + } + } + thread = NULL; + } + _PR_RUNQ_UNLOCK(cpu); + +#if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) + + wakeup_cpus = PR_FALSE; + _PR_CPU_LIST_LOCK(); + for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) { + if (cpu != _PR_CPU_PTR(qp)) { + if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus)) + != NULL) { + thread->cpu = cpu; + _PR_CPU_LIST_UNLOCK(); + if (wakeup_cpus == PR_TRUE) + _PR_MD_WAKEUP_CPUS(); + goto found_thread; + } + } + } + _PR_CPU_LIST_UNLOCK(); + if (wakeup_cpus == PR_TRUE) + _PR_MD_WAKEUP_CPUS(); + +#endif /* _PR_LOCAL_THREADS_ONLY */ + +idle_thread: + /* + ** There are no threads to run. Switch to the idle thread + */ + PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing")); + thread = _PR_MD_CURRENT_CPU()->idle_thread; + +found_thread: + PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) && + (!(thread->no_sched)))); + + /* Resume the thread */ + PR_LOG(_pr_sched_lm, PR_LOG_MAX, + ("switching to %d[%p]", thread->id, thread)); + PR_ASSERT(thread->state != _PR_RUNNING); + thread->state = _PR_RUNNING; + + /* If we are on the runq, it just means that we went to sleep on some + * resource, and by the time we got here another real native thread had + * already given us the resource and put us back on the runqueue + */ + PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU()); + if (thread != me) + _PR_MD_RESTORE_CONTEXT(thread); +#if 0 + /* XXXMB; with setjmp/longjmp it is impossible to land here, but + * it is not with fibers... Is this a bad thing? I believe it is + * still safe. + */ + PR_NOT_REACHED("impossible return from schedule"); +#endif +} + +/* +** Attaches a thread. +** Does not set the _PR_MD_CURRENT_THREAD. +** Does not specify the scope of the thread. +*/ +static PRThread * +_PR_AttachThread(PRThreadType type, PRThreadPriority priority, + PRThreadStack *stack) +{ + PRThread *thread; + char *mem; + + if (priority > PR_PRIORITY_LAST) { + priority = PR_PRIORITY_LAST; + } else if (priority < PR_PRIORITY_FIRST) { + priority = PR_PRIORITY_FIRST; + } + + mem = (char*) PR_CALLOC(sizeof(PRThread)); + if (mem) { + thread = (PRThread*) mem; + thread->priority = priority; + thread->stack = stack; + thread->state = _PR_RUNNING; + PR_INIT_CLIST(&thread->lockList); + if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { + PR_DELETE(thread); + return 0; + } + + return thread; + } + return 0; +} + + + +PR_IMPLEMENT(PRThread*) +_PR_NativeCreateThread(PRThreadType type, + void (*start)(void *arg), + void *arg, + PRThreadPriority priority, + PRThreadScope scope, + PRThreadState state, + PRUint32 stackSize, + PRUint32 flags) +{ + PRThread *thread; + + thread = _PR_AttachThread(type, priority, NULL); + + if (thread) { + PR_Lock(_pr_activeLock); + thread->flags = (flags | _PR_GLOBAL_SCOPE); + thread->id = ++_pr_utid; + if (type == PR_SYSTEM_THREAD) { + thread->flags |= _PR_SYSTEM; + _pr_systemActive++; + } else { + _pr_userActive++; + } + PR_Unlock(_pr_activeLock); + + thread->stack = PR_NEWZAP(PRThreadStack); + if (!thread->stack) { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + goto done; + } + thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE; + thread->stack->thr = thread; + thread->startFunc = start; + thread->arg = arg; + + /* + Set thread flags related to scope and joinable state. If joinable + thread, allocate a "termination" conidition variable. + */ + if (state == PR_JOINABLE_THREAD) { + thread->term = PR_NewCondVar(_pr_terminationCVLock); + if (thread->term == NULL) { + PR_DELETE(thread->stack); + goto done; + } + } + + thread->state = _PR_RUNNING; + if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority, + scope,state,stackSize) == PR_SUCCESS) { + return thread; + } + if (thread->term) { + PR_DestroyCondVar(thread->term); + thread->term = NULL; + } + PR_DELETE(thread->stack); + } + +done: + if (thread) { + _PR_DecrActiveThreadCount(thread); + _PR_DestroyThread(thread); + } + return NULL; +} + +/************************************************************************/ + +PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type, + void (*start)(void *arg), + void *arg, + PRThreadPriority priority, + PRThreadScope scope, + PRThreadState state, + PRUint32 stackSize, + PRUint32 flags) +{ + PRThread *me; + PRThread *thread = NULL; + PRThreadStack *stack; + char *top; + PRIntn is; + PRIntn native = 0; + PRIntn useRecycled = 0; + PRBool status; + + /* + First, pin down the priority. Not all compilers catch passing out of + range enum here. If we let bad values thru, priority queues won't work. + */ + if (priority > PR_PRIORITY_LAST) { + priority = PR_PRIORITY_LAST; + } else if (priority < PR_PRIORITY_FIRST) { + priority = PR_PRIORITY_FIRST; + } + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + if (! (flags & _PR_IDLE_THREAD)) + me = _PR_MD_CURRENT_THREAD(); + +#if defined(_PR_GLOBAL_THREADS_ONLY) + /* + * can create global threads only + */ + if (scope == PR_LOCAL_THREAD) + scope = PR_GLOBAL_THREAD; +#endif + + if (_native_threads_only) + scope = PR_GLOBAL_THREAD; + + native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD)) + && _PR_IS_NATIVE_THREAD_SUPPORTED()); + + _PR_ADJUST_STACKSIZE(stackSize); + + if (native) { + /* + * clear the IDLE_THREAD flag which applies to LOCAL + * threads only + */ + flags &= ~_PR_IDLE_THREAD; + flags |= _PR_GLOBAL_SCOPE; + if (_PR_NUM_DEADNATIVE > 0) { + _PR_DEADQ_LOCK; + + if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */ + _PR_DEADQ_UNLOCK; + } else { + thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next); + PR_REMOVE_LINK(&thread->links); + _PR_DEC_DEADNATIVE; + _PR_DEADQ_UNLOCK; + + _PR_InitializeRecycledThread(thread); + thread->startFunc = start; + thread->arg = arg; + thread->flags = (flags | _PR_GLOBAL_SCOPE); + if (type == PR_SYSTEM_THREAD) + { + thread->flags |= _PR_SYSTEM; + PR_ATOMIC_INCREMENT(&_pr_systemActive); + } + else PR_ATOMIC_INCREMENT(&_pr_userActive); + + if (state == PR_JOINABLE_THREAD) { + if (!thread->term) + thread->term = PR_NewCondVar(_pr_terminationCVLock); + } + else { + if(thread->term) { + PR_DestroyCondVar(thread->term); + thread->term = 0; + } + } + + thread->priority = priority; + _PR_MD_SET_PRIORITY(&(thread->md), priority); + /* XXX what about stackSize? */ + thread->state = _PR_RUNNING; + _PR_MD_WAKEUP_WAITER(thread); + return thread; + } + } + thread = _PR_NativeCreateThread(type, start, arg, priority, + scope, state, stackSize, flags); + } else { + if (_PR_NUM_DEADUSER > 0) { + _PR_DEADQ_LOCK; + + if (_PR_NUM_DEADUSER == 0) { /* thread safe check */ + _PR_DEADQ_UNLOCK; + } else { + PRCList *ptr; + + /* Go down list checking for a recycled thread with a + * large enough stack. XXXMB - this has a bad degenerate case. + */ + ptr = _PR_DEADUSERQ.next; + while( ptr != &_PR_DEADUSERQ ) { + thread = _PR_THREAD_PTR(ptr); + if ((thread->stack->stackSize >= stackSize) && + (!thread->no_sched)) { + PR_REMOVE_LINK(&thread->links); + _PR_DEC_DEADUSER; + break; + } else { + ptr = ptr->next; + thread = NULL; + } + } + + _PR_DEADQ_UNLOCK; + + if (thread) { + _PR_InitializeRecycledThread(thread); + thread->startFunc = start; + thread->arg = arg; + thread->priority = priority; + if (state == PR_JOINABLE_THREAD) { + if (!thread->term) + thread->term = PR_NewCondVar(_pr_terminationCVLock); + } else { + if(thread->term) { + PR_DestroyCondVar(thread->term); + thread->term = 0; + } + } + useRecycled++; + } + } + } + if (thread == NULL) { +#ifndef HAVE_CUSTOM_USER_THREADS + stack = _PR_NewStack(stackSize); + if (!stack) { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + return NULL; + } + + /* Allocate thread object and per-thread data off the top of the stack*/ + top = stack->stackTop; +#ifdef HAVE_STACK_GROWING_UP + thread = (PRThread*) top; + top = top + sizeof(PRThread); + /* + * Make stack 64-byte aligned + */ + if ((PRUptrdiff)top & 0x3f) { + top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f); + } +#else + top = top - sizeof(PRThread); + thread = (PRThread*) top; + /* + * Make stack 64-byte aligned + */ + if ((PRUptrdiff)top & 0x3f) { + top = (char*)((PRUptrdiff)top & ~0x3f); + } +#endif + stack->thr = thread; + memset(thread, 0, sizeof(PRThread)); + thread->threadAllocatedOnStack = 1; +#else + thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg); + if (!thread) { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + return NULL; + } + thread->threadAllocatedOnStack = 0; + stack = NULL; + top = NULL; +#endif + + /* Initialize thread */ + thread->tpdLength = 0; + thread->privateData = NULL; + thread->stack = stack; + thread->priority = priority; + thread->startFunc = start; + thread->arg = arg; + PR_INIT_CLIST(&thread->lockList); + + if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { + if (thread->threadAllocatedOnStack == 1) + _PR_FreeStack(thread->stack); + else { + PR_DELETE(thread); + } + PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); + return NULL; + } + + if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { + if (thread->threadAllocatedOnStack == 1) + _PR_FreeStack(thread->stack); + else { + PR_DELETE(thread->privateData); + PR_DELETE(thread); + } + PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); + return NULL; + } + + _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status); + + if (status == PR_FALSE) { + _PR_MD_FREE_LOCK(&thread->threadLock); + if (thread->threadAllocatedOnStack == 1) + _PR_FreeStack(thread->stack); + else { + PR_DELETE(thread->privateData); + PR_DELETE(thread); + } + return NULL; + } + + /* + Set thread flags related to scope and joinable state. If joinable + thread, allocate a "termination" condition variable. + */ + if (state == PR_JOINABLE_THREAD) { + thread->term = PR_NewCondVar(_pr_terminationCVLock); + if (thread->term == NULL) { + _PR_MD_FREE_LOCK(&thread->threadLock); + if (thread->threadAllocatedOnStack == 1) + _PR_FreeStack(thread->stack); + else { + PR_DELETE(thread->privateData); + PR_DELETE(thread); + } + return NULL; + } + } + + } + + /* Update thread type counter */ + PR_Lock(_pr_activeLock); + thread->flags = flags; + thread->id = ++_pr_utid; + if (type == PR_SYSTEM_THREAD) { + thread->flags |= _PR_SYSTEM; + _pr_systemActive++; + } else { + _pr_userActive++; + } + + /* Make thread runnable */ + thread->state = _PR_RUNNABLE; + /* + * Add to list of active threads + */ + PR_Unlock(_pr_activeLock); + + if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) ) + thread->cpu = _PR_GetPrimordialCPU(); + else + thread->cpu = _PR_MD_CURRENT_CPU(); + + PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); + + if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) { + _PR_INTSOFF(is); + _PR_RUNQ_LOCK(thread->cpu); + _PR_ADD_RUNQ(thread, thread->cpu, priority); + _PR_RUNQ_UNLOCK(thread->cpu); + } + + if (thread->flags & _PR_IDLE_THREAD) { + /* + ** If the creating thread is a kernel thread, we need to + ** awaken the user thread idle thread somehow; potentially + ** it could be sleeping in its idle loop, and we need to poke + ** it. To do so, wake the idle thread... + */ + _PR_MD_WAKEUP_WAITER(NULL); + } else if (_PR_IS_NATIVE_THREAD(me)) { + _PR_MD_WAKEUP_WAITER(thread); + } + if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) ) + _PR_INTSON(is); + } + + return thread; +} + +PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type, + void (*start)(void *arg), + void *arg, + PRThreadPriority priority, + PRThreadScope scope, + PRThreadState state, + PRUint32 stackSize) +{ + return _PR_CreateThread(type, start, arg, priority, scope, state, + stackSize, 0); +} + +/* +** Associate a thread object with an existing native thread. +** "type" is the type of thread object to attach +** "priority" is the priority to assign to the thread +** "stack" defines the shape of the threads stack +** +** This can return NULL if some kind of error occurs, or if memory is +** tight. +** +** This call is not normally needed unless you create your own native +** thread. PR_Init does this automatically for the primordial thread. +*/ +PRThread* _PRI_AttachThread(PRThreadType type, + PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags) +{ + PRThread *thread; + + if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) { + return thread; + } + _PR_MD_SET_CURRENT_THREAD(NULL); + + /* Clear out any state if this thread was attached before */ + _PR_MD_SET_CURRENT_CPU(NULL); + + thread = _PR_AttachThread(type, priority, stack); + if (thread) { + PRIntn is; + + _PR_MD_SET_CURRENT_THREAD(thread); + + thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED; + + if (!stack) { + thread->stack = PR_NEWZAP(PRThreadStack); + if (!thread->stack) { + _PR_DestroyThread(thread); + return NULL; + } + thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE; + } + PR_INIT_CLIST(&thread->links); + + if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) { + PR_DELETE(thread->stack); + _PR_DestroyThread(thread); + return NULL; + } + + _PR_MD_SET_CURRENT_CPU(NULL); + + if (_PR_MD_CURRENT_CPU()) { + _PR_INTSOFF(is); + PR_Lock(_pr_activeLock); + } + if (type == PR_SYSTEM_THREAD) { + thread->flags |= _PR_SYSTEM; + _pr_systemActive++; + } else { + _pr_userActive++; + } + if (_PR_MD_CURRENT_CPU()) { + PR_Unlock(_pr_activeLock); + _PR_INTSON(is); + } + } + return thread; +} + +PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type, + PRThreadPriority priority, PRThreadStack *stack) +{ + return PR_GetCurrentThread(); +} + +PR_IMPLEMENT(void) PR_DetachThread(void) +{ + /* + * On IRIX, Solaris, and Windows, foreign threads are detached when + * they terminate. + */ +#if !defined(IRIX) && !defined(WIN32) \ + && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY)) + PRThread *me; + if (_pr_initialized) { + me = _PR_MD_GET_ATTACHED_THREAD(); + if ((me != NULL) && (me->flags & _PR_ATTACHED)) + _PRI_DetachThread(); + } +#endif +} + +void _PRI_DetachThread(void) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if (me->flags & _PR_PRIMORDIAL) { + /* + * ignore, if primordial thread + */ + return; + } + PR_ASSERT(me->flags & _PR_ATTACHED); + PR_ASSERT(_PR_IS_NATIVE_THREAD(me)); + _PR_CleanupThread(me); + PR_DELETE(me->privateData); + + _PR_DecrActiveThreadCount(me); + + _PR_MD_CLEAN_THREAD(me); + _PR_MD_SET_CURRENT_THREAD(NULL); + if (!me->threadAllocatedOnStack) + PR_DELETE(me->stack); + _PR_MD_FREE_LOCK(&me->threadLock); + PR_DELETE(me); +} + +/* +** Wait for thread termination: +** "thread" is the target thread +** +** This can return PR_FAILURE if no joinable thread could be found +** corresponding to the specified target thread. +** +** The calling thread is suspended until the target thread completes. +** Several threads cannot wait for the same thread to complete; one thread +** will complete successfully and others will terminate with an error PR_FAILURE. +** The calling thread will not be blocked if the target thread has already +** terminated. +*/ +PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread) +{ + PRIntn is; + PRCondVar *term; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + term = thread->term; + /* can't join a non-joinable thread */ + if (term == NULL) { + PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); + goto ErrorExit; + } + + /* multiple threads can't wait on the same joinable thread */ + if (term->condQ.next != &term->condQ) { + goto ErrorExit; + } + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + + /* wait for the target thread's termination cv invariant */ + PR_Lock (_pr_terminationCVLock); + while (thread->state != _PR_JOIN_WAIT) { + (void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT); + } + (void) PR_Unlock (_pr_terminationCVLock); + + /* + Remove target thread from global waiting to join Q; make it runnable + again and put it back on its run Q. When it gets scheduled later in + _PR_RunThread code, it will clean up its stack. + */ + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + thread->state = _PR_RUNNABLE; + if ( !_PR_IS_NATIVE_THREAD(thread) ) { + _PR_THREAD_LOCK(thread); + + _PR_MISCQ_LOCK(thread->cpu); + _PR_DEL_JOINQ(thread); + _PR_MISCQ_UNLOCK(thread->cpu); + + _PR_AddThreadToRunQ(me, thread); + _PR_THREAD_UNLOCK(thread); + } + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + + _PR_MD_WAKEUP_WAITER(thread); + + return PR_SUCCESS; + +ErrorExit: + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); + return PR_FAILURE; +} + +PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread, + PRThreadPriority newPri) +{ + + /* + First, pin down the priority. Not all compilers catch passing out of + range enum here. If we let bad values thru, priority queues won't work. + */ + if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) { + newPri = PR_PRIORITY_LAST; + } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) { + newPri = PR_PRIORITY_FIRST; + } + + if ( _PR_IS_NATIVE_THREAD(thread) ) { + thread->priority = newPri; + _PR_MD_SET_PRIORITY(&(thread->md), newPri); + } else _PR_SetThreadPriority(thread, newPri); +} + + +/* +** This routine prevents all other threads from running. This call is needed by +** the garbage collector. +*/ +PR_IMPLEMENT(void) PR_SuspendAll(void) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PRCList *qp; + + /* + * Stop all user and native threads which are marked GC able. + */ + PR_Lock(_pr_activeLock); + suspendAllOn = PR_TRUE; + suspendAllThread = _PR_MD_CURRENT_THREAD(); + _PR_MD_BEGIN_SUSPEND_ALL(); + for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; + qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { + if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && + _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) { + _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); + PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING); + } + } + for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; + qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { + if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && + _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) + /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */ + _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); + } + _PR_MD_END_SUSPEND_ALL(); +} + +/* +** This routine unblocks all other threads that were suspended from running by +** PR_SuspendAll(). This call is needed by the garbage collector. +*/ +PR_IMPLEMENT(void) PR_ResumeAll(void) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + PRCList *qp; + + /* + * Resume all user and native threads which are marked GC able. + */ + _PR_MD_BEGIN_RESUME_ALL(); + for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; + qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { + if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && + _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) + _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp)); + } + for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; + qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { + if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && + _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) + _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); + } + _PR_MD_END_RESUME_ALL(); + suspendAllThread = NULL; + suspendAllOn = PR_FALSE; + PR_Unlock(_pr_activeLock); +} + +PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg) +{ + PRCList *qp, *qp_next; + PRIntn i = 0; + PRStatus rv = PR_SUCCESS; + PRThread* t; + + /* + ** Currently Enumerate threads happen only with suspension and + ** pr_activeLock held + */ + PR_ASSERT(suspendAllOn); + + /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking + * qp->next after applying the function "func". In particular, "func" + * might remove the thread from the queue and put it into another one in + * which case qp->next no longer points to the next entry in the original + * queue. + * + * To get around this problem, we save qp->next in qp_next before applying + * "func" and use that saved value as the next value after applying "func". + */ + + /* + * Traverse the list of local and global threads + */ + for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; + qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next) + { + qp_next = qp->next; + t = _PR_ACTIVE_THREAD_PTR(qp); + if (_PR_IS_GCABLE_THREAD(t)) + { + rv = (*func)(t, i, arg); + if (rv != PR_SUCCESS) + return rv; + i++; + } + } + for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; + qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next) + { + qp_next = qp->next; + t = _PR_ACTIVE_THREAD_PTR(qp); + if (_PR_IS_GCABLE_THREAD(t)) + { + rv = (*func)(t, i, arg); + if (rv != PR_SUCCESS) + return rv; + i++; + } + } + return rv; +} + +/* FUNCTION: _PR_AddSleepQ +** DESCRIPTION: +** Adds a thread to the sleep/pauseQ. +** RESTRICTIONS: +** Caller must have the RUNQ lock. +** Caller must be a user level thread +*/ +PR_IMPLEMENT(void) +_PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout) +{ + _PRCPU *cpu = thread->cpu; + + if (timeout == PR_INTERVAL_NO_TIMEOUT) { + /* append the thread to the global pause Q */ + PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu)); + thread->flags |= _PR_ON_PAUSEQ; + } else { + PRIntervalTime sleep; + PRCList *q; + PRThread *t; + + /* sort onto global sleepQ */ + sleep = timeout; + + /* Check if we are longest timeout */ + if (timeout >= _PR_SLEEPQMAX(cpu)) { + PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu)); + thread->sleep = timeout - _PR_SLEEPQMAX(cpu); + _PR_SLEEPQMAX(cpu) = timeout; + } else { + /* Sort thread into global sleepQ at appropriate point */ + q = _PR_SLEEPQ(cpu).next; + + /* Now scan the list for where to insert this entry */ + while (q != &_PR_SLEEPQ(cpu)) { + t = _PR_THREAD_PTR(q); + if (sleep < t->sleep) { + /* Found sleeper to insert in front of */ + break; + } + sleep -= t->sleep; + q = q->next; + } + thread->sleep = sleep; + PR_INSERT_BEFORE(&thread->links, q); + + /* + ** Subtract our sleep time from the sleeper that follows us (there + ** must be one) so that they remain relative to us. + */ + PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu)); + + t = _PR_THREAD_PTR(thread->links.next); + PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread); + t->sleep -= sleep; + } + + thread->flags |= _PR_ON_SLEEPQ; + } +} + +/* FUNCTION: _PR_DelSleepQ +** DESCRIPTION: +** Removes a thread from the sleep/pauseQ. +** INPUTS: +** If propogate_time is true, then the thread following the deleted +** thread will be get the time from the deleted thread. This is used +** when deleting a sleeper that has not timed out. +** RESTRICTIONS: +** Caller must have the RUNQ lock. +** Caller must be a user level thread +*/ +PR_IMPLEMENT(void) +_PR_DelSleepQ(PRThread *thread, PRBool propogate_time) +{ + _PRCPU *cpu = thread->cpu; + + /* Remove from pauseQ/sleepQ */ + if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { + if (thread->flags & _PR_ON_SLEEPQ) { + PRCList *q = thread->links.next; + if (q != &_PR_SLEEPQ(cpu)) { + if (propogate_time == PR_TRUE) { + PRThread *after = _PR_THREAD_PTR(q); + after->sleep += thread->sleep; + } else + _PR_SLEEPQMAX(cpu) -= thread->sleep; + } else { + /* Check if prev is the beggining of the list; if so, + * we are the only element on the list. + */ + if (thread->links.prev != &_PR_SLEEPQ(cpu)) + _PR_SLEEPQMAX(cpu) -= thread->sleep; + else + _PR_SLEEPQMAX(cpu) = 0; + } + thread->flags &= ~_PR_ON_SLEEPQ; + } else { + thread->flags &= ~_PR_ON_PAUSEQ; + } + PR_REMOVE_LINK(&thread->links); + } else + PR_ASSERT(0); +} + +void +_PR_AddThreadToRunQ( + PRThread *me, /* the current thread */ + PRThread *thread) /* the local thread to be added to a run queue */ +{ + PRThreadPriority pri = thread->priority; + _PRCPU *cpu = thread->cpu; + + PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); + +#if defined(WINNT) + /* + * On NT, we can only reliably know that the current CPU + * is not idle. We add the awakened thread to the run + * queue of its CPU if its CPU is the current CPU. + * For any other CPU, we don't really know whether it + * is busy or idle. So in all other cases, we just + * "post" the awakened thread to the IO completion port + * for the next idle CPU to execute (this is done in + * _PR_MD_WAKEUP_WAITER). + * Threads with a suspended I/O operation remain bound to + * the same cpu until I/O is cancelled + * + * NOTE: the boolean expression below must be the exact + * opposite of the corresponding boolean expression in + * _PR_MD_WAKEUP_WAITER. + */ + if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) || + (thread->md.thr_bound_cpu)) { + PR_ASSERT(!thread->md.thr_bound_cpu || + (thread->md.thr_bound_cpu == cpu)); + _PR_RUNQ_LOCK(cpu); + _PR_ADD_RUNQ(thread, cpu, pri); + _PR_RUNQ_UNLOCK(cpu); + } +#else + _PR_RUNQ_LOCK(cpu); + _PR_ADD_RUNQ(thread, cpu, pri); + _PR_RUNQ_UNLOCK(cpu); + if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) { + if (pri > me->priority) { + _PR_SET_RESCHED_FLAG(); + } + } +#endif +} diff --git a/mozilla/nsprpub/pr/src/threads/prcmon.c b/mozilla/nsprpub/pr/src/threads/prcmon.c new file mode 100644 index 0000000..67aee4c --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/prcmon.c @@ -0,0 +1,463 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +#include <stdlib.h> +#include <stddef.h> + +/* Lock used to lock the monitor cache */ +#ifdef _PR_NO_PREEMPT +#define _PR_NEW_LOCK_MCACHE() +#define _PR_DESTROY_LOCK_MCACHE() +#define _PR_LOCK_MCACHE() +#define _PR_UNLOCK_MCACHE() +#else +#ifdef _PR_LOCAL_THREADS_ONLY +#define _PR_NEW_LOCK_MCACHE() +#define _PR_DESTROY_LOCK_MCACHE() +#define _PR_LOCK_MCACHE() { PRIntn _is; _PR_INTSOFF(_is) +#define _PR_UNLOCK_MCACHE() _PR_INTSON(_is); } +#else +PRLock *_pr_mcacheLock; +#define _PR_NEW_LOCK_MCACHE() (_pr_mcacheLock = PR_NewLock()) +#define _PR_DESTROY_LOCK_MCACHE() \ + PR_BEGIN_MACRO \ + if (_pr_mcacheLock) { \ + PR_DestroyLock(_pr_mcacheLock); \ + _pr_mcacheLock = NULL; \ + } \ + PR_END_MACRO +#define _PR_LOCK_MCACHE() PR_Lock(_pr_mcacheLock) +#define _PR_UNLOCK_MCACHE() PR_Unlock(_pr_mcacheLock) +#endif +#endif + +/************************************************************************/ + +typedef struct MonitorCacheEntryStr MonitorCacheEntry; + +struct MonitorCacheEntryStr { + MonitorCacheEntry* next; + void* address; + PRMonitor* mon; + long cacheEntryCount; +}; + +/* +** An array of MonitorCacheEntry's, plus a pointer to link these +** arrays together. +*/ + +typedef struct MonitorCacheEntryBlockStr MonitorCacheEntryBlock; + +struct MonitorCacheEntryBlockStr { + MonitorCacheEntryBlock* next; + MonitorCacheEntry entries[1]; +}; + +static PRUint32 hash_mask; +static PRUintn num_hash_buckets; +static PRUintn num_hash_buckets_log2; +static MonitorCacheEntry **hash_buckets; +static MonitorCacheEntry *free_entries; +static PRUintn num_free_entries; +static PRBool expanding; +static MonitorCacheEntryBlock *mcache_blocks; + +static void (*OnMonitorRecycle)(void *address); + +#define HASH(address) \ + ((PRUint32) ( ((PRUptrdiff)(address) >> 2) ^ \ + ((PRUptrdiff)(address) >> 10) ) \ + & hash_mask) + +/* +** Expand the monitor cache. This grows the hash buckets and allocates a +** new chunk of cache entries and throws them on the free list. We keep +** as many hash buckets as there are entries. +** +** Because we call malloc and malloc may need the monitor cache, we must +** ensure that there are several free monitor cache entries available for +** malloc to get. FREE_THRESHOLD is used to prevent monitor cache +** starvation during monitor cache expansion. +*/ + +#define FREE_THRESHOLD 5 + +static PRStatus ExpandMonitorCache(PRUintn new_size_log2) +{ + MonitorCacheEntry **old_hash_buckets, *p; + PRUintn i, entries, old_num_hash_buckets, added; + MonitorCacheEntry **new_hash_buckets; + MonitorCacheEntryBlock *new_block; + + entries = 1L << new_size_log2; + + /* + ** Expand the monitor-cache-entry free list + */ + new_block = (MonitorCacheEntryBlock*) + PR_CALLOC(sizeof(MonitorCacheEntryBlock) + + (entries - 1) * sizeof(MonitorCacheEntry)); + if (NULL == new_block) return PR_FAILURE; + + /* + ** Allocate system monitors for the new monitor cache entries. If we + ** run out of system monitors, break out of the loop. + */ + for (i = 0, p = new_block->entries; i < entries; i++, p++) { + p->mon = PR_NewMonitor(); + if (!p->mon) + break; + } + added = i; + if (added != entries) { + MonitorCacheEntryBlock *realloc_block; + + if (added == 0) { + /* Totally out of system monitors. Lossage abounds */ + PR_DELETE(new_block); + return PR_FAILURE; + } + + /* + ** We were able to allocate some of the system monitors. Use + ** realloc to shrink down the new_block memory. If that fails, + ** carry on with the too-large new_block. + */ + realloc_block = (MonitorCacheEntryBlock*) + PR_REALLOC(new_block, sizeof(MonitorCacheEntryBlock) + + (added - 1) * sizeof(MonitorCacheEntry)); + if (realloc_block) + new_block = realloc_block; + } + + /* + ** Now that we have allocated all of the system monitors, build up + ** the new free list. We can just update the free_list because we own + ** the mcache-lock and we aren't calling anyone who might want to use + ** it. + */ + for (i = 0, p = new_block->entries; i < added - 1; i++, p++) + p->next = p + 1; + p->next = free_entries; + free_entries = new_block->entries; + num_free_entries += added; + new_block->next = mcache_blocks; + mcache_blocks = new_block; + + /* Try to expand the hash table */ + new_hash_buckets = (MonitorCacheEntry**) + PR_CALLOC(entries * sizeof(MonitorCacheEntry*)); + if (NULL == new_hash_buckets) { + /* + ** Partial lossage. In this situation we don't get any more hash + ** buckets, which just means that the table lookups will take + ** longer. This is bad, but not fatal + */ + PR_LOG(_pr_cmon_lm, PR_LOG_WARNING, + ("unable to grow monitor cache hash buckets")); + return PR_SUCCESS; + } + + /* + ** Compute new hash mask value. This value is used to mask an address + ** until it's bits are in the right spot for indexing into the hash + ** table. + */ + hash_mask = entries - 1; + + /* + ** Expand the hash table. We have to rehash everything in the old + ** table into the new table. + */ + old_hash_buckets = hash_buckets; + old_num_hash_buckets = num_hash_buckets; + for (i = 0; i < old_num_hash_buckets; i++) { + p = old_hash_buckets[i]; + while (p) { + MonitorCacheEntry *next = p->next; + + /* Hash based on new table size, and then put p in the new table */ + PRUintn hash = HASH(p->address); + p->next = new_hash_buckets[hash]; + new_hash_buckets[hash] = p; + + p = next; + } + } + + /* + ** Switch over to new hash table and THEN call free of the old + ** table. Since free might re-enter _pr_mcache_lock, things would + ** break terribly if it used the old hash table. + */ + hash_buckets = new_hash_buckets; + num_hash_buckets = entries; + num_hash_buckets_log2 = new_size_log2; + PR_DELETE(old_hash_buckets); + + PR_LOG(_pr_cmon_lm, PR_LOG_NOTICE, + ("expanded monitor cache to %d (buckets %d)", + num_free_entries, entries)); + + return PR_SUCCESS; +} /* ExpandMonitorCache */ + +/* +** Lookup a monitor cache entry by address. Return a pointer to the +** pointer to the monitor cache entry on success, null on failure. +*/ +static MonitorCacheEntry **LookupMonitorCacheEntry(void *address) +{ + PRUintn hash; + MonitorCacheEntry **pp, *p; + + hash = HASH(address); + pp = hash_buckets + hash; + while ((p = *pp) != 0) { + if (p->address == address) { + if (p->cacheEntryCount > 0) + return pp; + return NULL; + } + pp = &p->next; + } + return NULL; +} + +/* +** Try to create a new cached monitor. If it's already in the cache, +** great - return it. Otherwise get a new free cache entry and set it +** up. If the cache free space is getting low, expand the cache. +*/ +static PRMonitor *CreateMonitor(void *address) +{ + PRUintn hash; + MonitorCacheEntry **pp, *p; + + hash = HASH(address); + pp = hash_buckets + hash; + while ((p = *pp) != 0) { + if (p->address == address) goto gotit; + + pp = &p->next; + } + + /* Expand the monitor cache if we have run out of free slots in the table */ + if (num_free_entries < FREE_THRESHOLD) { + /* Expand monitor cache */ + + /* + ** This function is called with the lock held. So what's the 'expanding' + ** boolean all about? Seems a bit redundant. + */ + if (!expanding) { + PRStatus rv; + + expanding = PR_TRUE; + rv = ExpandMonitorCache(num_hash_buckets_log2 + 1); + expanding = PR_FALSE; + if (PR_FAILURE == rv) return NULL; + + /* redo the hash because it'll be different now */ + hash = HASH(address); + } else { + /* + ** We are in process of expanding and we need a cache + ** monitor. Make sure we have enough! + */ + PR_ASSERT(num_free_entries > 0); + } + } + + /* Make a new monitor */ + p = free_entries; + free_entries = p->next; + num_free_entries--; + if (OnMonitorRecycle && p->address) + OnMonitorRecycle(p->address); + p->address = address; + p->next = hash_buckets[hash]; + hash_buckets[hash] = p; + PR_ASSERT(p->cacheEntryCount == 0); + + gotit: + p->cacheEntryCount++; + return p->mon; +} + +/* +** Initialize the monitor cache +*/ +void _PR_InitCMon(void) +{ + _PR_NEW_LOCK_MCACHE(); + ExpandMonitorCache(3); +} + +/* +** Destroy the monitor cache +*/ +void _PR_CleanupCMon(void) +{ + _PR_DESTROY_LOCK_MCACHE(); + + while (free_entries) { + PR_DestroyMonitor(free_entries->mon); + free_entries = free_entries->next; + } + num_free_entries = 0; + + while (mcache_blocks) { + MonitorCacheEntryBlock *block; + + block = mcache_blocks; + mcache_blocks = block->next; + PR_DELETE(block); + } + + PR_DELETE(hash_buckets); + hash_mask = 0; + num_hash_buckets = 0; + num_hash_buckets_log2 = 0; + + expanding = PR_FALSE; + OnMonitorRecycle = NULL; +} + +/* +** Create monitor for address. Don't enter the monitor while we have the +** mcache locked because we might block! +*/ +PR_IMPLEMENT(PRMonitor*) PR_CEnterMonitor(void *address) +{ + PRMonitor *mon; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + _PR_LOCK_MCACHE(); + mon = CreateMonitor(address); + _PR_UNLOCK_MCACHE(); + + if (!mon) return NULL; + + PR_EnterMonitor(mon); + return mon; +} + +PR_IMPLEMENT(PRStatus) PR_CExitMonitor(void *address) +{ + MonitorCacheEntry **pp, *p; + PRStatus status = PR_SUCCESS; + + _PR_LOCK_MCACHE(); + pp = LookupMonitorCacheEntry(address); + if (pp != NULL) { + p = *pp; + if (--p->cacheEntryCount == 0) { + /* + ** Nobody is using the system monitor. Put it on the cached free + ** list. We are safe from somebody trying to use it because we + ** have the mcache locked. + */ + p->address = 0; /* defensive move */ + *pp = p->next; /* unlink from hash_buckets */ + p->next = free_entries; /* link into free list */ + free_entries = p; + num_free_entries++; /* count it as free */ + } + status = PR_ExitMonitor(p->mon); + } else { + status = PR_FAILURE; + } + _PR_UNLOCK_MCACHE(); + + return status; +} + +PR_IMPLEMENT(PRStatus) PR_CWait(void *address, PRIntervalTime ticks) +{ + MonitorCacheEntry **pp; + PRMonitor *mon; + + _PR_LOCK_MCACHE(); + pp = LookupMonitorCacheEntry(address); + mon = pp ? ((*pp)->mon) : NULL; + _PR_UNLOCK_MCACHE(); + + if (mon == NULL) + return PR_FAILURE; + return PR_Wait(mon, ticks); +} + +PR_IMPLEMENT(PRStatus) PR_CNotify(void *address) +{ + MonitorCacheEntry **pp; + PRMonitor *mon; + + _PR_LOCK_MCACHE(); + pp = LookupMonitorCacheEntry(address); + mon = pp ? ((*pp)->mon) : NULL; + _PR_UNLOCK_MCACHE(); + + if (mon == NULL) + return PR_FAILURE; + return PR_Notify(mon); +} + +PR_IMPLEMENT(PRStatus) PR_CNotifyAll(void *address) +{ + MonitorCacheEntry **pp; + PRMonitor *mon; + + _PR_LOCK_MCACHE(); + pp = LookupMonitorCacheEntry(address); + mon = pp ? ((*pp)->mon) : NULL; + _PR_UNLOCK_MCACHE(); + + if (mon == NULL) + return PR_FAILURE; + return PR_NotifyAll(mon); +} + +PR_IMPLEMENT(void) +PR_CSetOnMonitorRecycle(void (*callback)(void *address)) +{ + OnMonitorRecycle = callback; +} diff --git a/mozilla/nsprpub/pr/src/threads/prcthr.c b/mozilla/nsprpub/pr/src/threads/prcthr.c new file mode 100644 index 0000000..7c3e627 --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/prcthr.c @@ -0,0 +1,426 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +#if defined(WIN95) +/* +** Some local variables report warnings on Win95 because the code paths +** using them are conditioned on HAVE_CUSTOME_USER_THREADS. +** The pragma suppresses the warning. +** +*/ +#pragma warning(disable : 4101) +#endif + + +extern PRLock *_pr_sleeplock; /* allocated and initialized in prinit */ +/* +** Routines common to both native and user threads. +** +** +** Clean up a thread object, releasing all of the attached data. Do not +** free the object itself (it may not have been malloc'd) +*/ +void _PR_CleanupThread(PRThread *thread) +{ + /* Free up per-thread-data */ + _PR_DestroyThreadPrivate(thread); + + /* Free any thread dump procs */ + if (thread->dumpArg) { + PR_DELETE(thread->dumpArg); + } + thread->dump = 0; + + PR_DELETE(thread->errorString); + thread->errorStringSize = 0; + thread->errorStringLength = 0; + thread->environment = NULL; +} + +PR_IMPLEMENT(PRStatus) PR_Yield() +{ + static PRBool warning = PR_TRUE; + if (warning) warning = _PR_Obsolete( + "PR_Yield()", "PR_Sleep(PR_INTERVAL_NO_WAIT)"); + return (PR_Sleep(PR_INTERVAL_NO_WAIT)); +} + +/* +** Make the current thread sleep until "timeout" ticks amount of time +** has expired. If "timeout" is PR_INTERVAL_NO_WAIT then the call is +** equivalent to a yield. Waiting for an infinite amount of time is +** allowed in the expectation that another thread will interrupt(). +** +** A single lock is used for all threads calling sleep. Each caller +** does get its own condition variable since each is expected to have +** a unique 'timeout'. +*/ +PR_IMPLEMENT(PRStatus) PR_Sleep(PRIntervalTime timeout) +{ + PRStatus rv = PR_SUCCESS; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + if (PR_INTERVAL_NO_WAIT == timeout) + { + /* + ** This is a simple yield, nothing more, nothing less. + */ + PRIntn is; + PRThread *me = PR_GetCurrentThread(); + PRUintn pri = me->priority; + _PRCPU *cpu = _PR_MD_CURRENT_CPU(); + + if ( _PR_IS_NATIVE_THREAD(me) ) _PR_MD_YIELD(); + else + { + _PR_INTSOFF(is); + _PR_RUNQ_LOCK(cpu); + if (_PR_RUNQREADYMASK(cpu) >> pri) { + me->cpu = cpu; + me->state = _PR_RUNNABLE; + _PR_ADD_RUNQ(me, cpu, pri); + _PR_RUNQ_UNLOCK(cpu); + + PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("PR_Yield: yielding")); + _PR_MD_SWITCH_CONTEXT(me); + PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("PR_Yield: done")); + + _PR_FAST_INTSON(is); + } + else + { + _PR_RUNQ_UNLOCK(cpu); + _PR_INTSON(is); + } + } + } + else + { + /* + ** This is waiting for some finite period of time. + ** A thread in this state is interruptible (PR_Interrupt()), + ** but the lock and cvar used are local to the implementation + ** and not visible to the caller, therefore not notifiable. + */ + PRCondVar *cv; + PRIntervalTime timein; + + timein = PR_IntervalNow(); + cv = PR_NewCondVar(_pr_sleeplock); + PR_ASSERT(cv != NULL); + PR_Lock(_pr_sleeplock); + do + { + PRIntervalTime delta = PR_IntervalNow() - timein; + if (delta > timeout) break; + rv = PR_WaitCondVar(cv, timeout - delta); + } while (rv == PR_SUCCESS); + PR_Unlock(_pr_sleeplock); + PR_DestroyCondVar(cv); + } + return rv; +} + +PR_IMPLEMENT(PRUint32) PR_GetThreadID(PRThread *thread) +{ + return thread->id; +} + +PR_IMPLEMENT(PRThreadPriority) PR_GetThreadPriority(const PRThread *thread) +{ + return (PRThreadPriority) thread->priority; +} + +PR_IMPLEMENT(PRThread *) PR_GetCurrentThread() +{ + if (!_pr_initialized) _PR_ImplicitInitialization(); + return _PR_MD_CURRENT_THREAD(); +} + +/* +** Set the interrupt flag for a thread. The thread will be unable to +** block in i/o functions when this happens. Also, any PR_Wait's in +** progress will be undone. The interrupt remains in force until +** PR_ClearInterrupt is called. +*/ +PR_IMPLEMENT(PRStatus) PR_Interrupt(PRThread *thread) +{ +#ifdef _PR_GLOBAL_THREADS_ONLY + PRCondVar *victim; + + _PR_THREAD_LOCK(thread); + thread->flags |= _PR_INTERRUPT; + victim = thread->wait.cvar; + _PR_THREAD_UNLOCK(thread); + if ((NULL != victim) && (!(thread->flags & _PR_INTERRUPT_BLOCKED))) { + int haveLock = (victim->lock->owner == _PR_MD_CURRENT_THREAD()); + + if (!haveLock) PR_Lock(victim->lock); + PR_NotifyAllCondVar(victim); + if (!haveLock) PR_Unlock(victim->lock); + } + return PR_SUCCESS; +#else /* ! _PR_GLOBAL_THREADS_ONLY */ + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSOFF(is); + + _PR_THREAD_LOCK(thread); + thread->flags |= _PR_INTERRUPT; + switch (thread->state) { + case _PR_COND_WAIT: + /* + * call is made with thread locked; + * on return lock is released + */ + if (!(thread->flags & _PR_INTERRUPT_BLOCKED)) + _PR_NotifyLockedThread(thread); + break; + case _PR_IO_WAIT: + /* + * Need to hold the thread lock when calling + * _PR_Unblock_IO_Wait(). On return lock is + * released. + */ +#if defined(XP_UNIX) || defined(WINNT) || defined(WIN16) + if (!(thread->flags & _PR_INTERRUPT_BLOCKED)) + _PR_Unblock_IO_Wait(thread); +#else + _PR_THREAD_UNLOCK(thread); +#endif + break; + case _PR_RUNNING: + case _PR_RUNNABLE: + case _PR_LOCK_WAIT: + default: + _PR_THREAD_UNLOCK(thread); + break; + } + if (!_PR_IS_NATIVE_THREAD(me)) + _PR_INTSON(is); + return PR_SUCCESS; +#endif /* _PR_GLOBAL_THREADS_ONLY */ +} + +/* +** Clear the interrupt flag for self. +*/ +PR_IMPLEMENT(void) PR_ClearInterrupt() +{ + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); + _PR_THREAD_LOCK(me); + me->flags &= ~_PR_INTERRUPT; + _PR_THREAD_UNLOCK(me); + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); +} + +PR_IMPLEMENT(void) PR_BlockInterrupt() +{ + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); + _PR_THREAD_LOCK(me); + _PR_THREAD_BLOCK_INTERRUPT(me); + _PR_THREAD_UNLOCK(me); + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); +} /* PR_BlockInterrupt */ + +PR_IMPLEMENT(void) PR_UnblockInterrupt() +{ + PRIntn is; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); + _PR_THREAD_LOCK(me); + _PR_THREAD_UNBLOCK_INTERRUPT(me); + _PR_THREAD_UNLOCK(me); + if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); +} /* PR_UnblockInterrupt */ + +/* +** Return the thread stack pointer of the given thread. +*/ +PR_IMPLEMENT(void *) PR_GetSP(PRThread *thread) +{ + return (void *)_PR_MD_GET_SP(thread); +} + +PR_IMPLEMENT(void*) GetExecutionEnvironment(PRThread *thread) +{ + return thread->environment; +} + +PR_IMPLEMENT(void) SetExecutionEnvironment(PRThread *thread, void *env) +{ + thread->environment = env; +} + + +PR_IMPLEMENT(PRInt32) PR_GetThreadAffinityMask(PRThread *thread, PRUint32 *mask) +{ +#ifdef HAVE_THREAD_AFFINITY + return _PR_MD_GETTHREADAFFINITYMASK(thread, mask); +#else + return 0; +#endif +} + +PR_IMPLEMENT(PRInt32) PR_SetThreadAffinityMask(PRThread *thread, PRUint32 mask ) +{ +#ifdef HAVE_THREAD_AFFINITY +#ifndef IRIX + return _PR_MD_SETTHREADAFFINITYMASK(thread, mask); +#else + return 0; +#endif +#else + return 0; +#endif +} + +/* This call is thread unsafe if another thread is calling SetConcurrency() + */ +PR_IMPLEMENT(PRInt32) PR_SetCPUAffinityMask(PRUint32 mask) +{ +#ifdef HAVE_THREAD_AFFINITY + PRCList *qp; + extern PRUint32 _pr_cpu_affinity_mask; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + _pr_cpu_affinity_mask = mask; + + qp = _PR_CPUQ().next; + while(qp != &_PR_CPUQ()) { + _PRCPU *cpu; + + cpu = _PR_CPU_PTR(qp); + PR_SetThreadAffinityMask(cpu->thread, mask); + + qp = qp->next; + } +#endif + + return 0; +} + +PRUint32 _pr_recycleThreads = 0; +PR_IMPLEMENT(void) PR_SetThreadRecycleMode(PRUint32 count) +{ + _pr_recycleThreads = count; +} + +PR_IMPLEMENT(PRThread*) PR_CreateThreadGCAble(PRThreadType type, + void (*start)(void *arg), + void *arg, + PRThreadPriority priority, + PRThreadScope scope, + PRThreadState state, + PRUint32 stackSize) +{ + return _PR_CreateThread(type, start, arg, priority, scope, state, + stackSize, _PR_GCABLE_THREAD); +} + +#ifdef SOLARIS +PR_IMPLEMENT(PRThread*) PR_CreateThreadBound(PRThreadType type, + void (*start)(void *arg), + void *arg, + PRUintn priority, + PRThreadScope scope, + PRThreadState state, + PRUint32 stackSize) +{ + return _PR_CreateThread(type, start, arg, priority, scope, state, + stackSize, _PR_BOUND_THREAD); +} +#endif + + +PR_IMPLEMENT(PRThread*) PR_AttachThreadGCAble( + PRThreadType type, PRThreadPriority priority, PRThreadStack *stack) +{ + /* $$$$ not sure how to finese this one */ + PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); + return NULL; +} + +PR_IMPLEMENT(void) PR_SetThreadGCAble() +{ + if (!_pr_initialized) _PR_ImplicitInitialization(); + PR_Lock(_pr_activeLock); + _PR_MD_CURRENT_THREAD()->flags |= _PR_GCABLE_THREAD; + PR_Unlock(_pr_activeLock); +} + +PR_IMPLEMENT(void) PR_ClearThreadGCAble() +{ + if (!_pr_initialized) _PR_ImplicitInitialization(); + PR_Lock(_pr_activeLock); + _PR_MD_CURRENT_THREAD()->flags &= (~_PR_GCABLE_THREAD); + PR_Unlock(_pr_activeLock); +} + +PR_IMPLEMENT(PRThreadScope) PR_GetThreadScope(const PRThread *thread) +{ + if (!_pr_initialized) _PR_ImplicitInitialization(); + + if (_PR_IS_NATIVE_THREAD(thread)) { + return (thread->flags & _PR_BOUND_THREAD) ? PR_GLOBAL_BOUND_THREAD : + PR_GLOBAL_THREAD; + } else + return PR_LOCAL_THREAD; +} + +PR_IMPLEMENT(PRThreadType) PR_GetThreadType(const PRThread *thread) +{ + return (thread->flags & _PR_SYSTEM) ? PR_SYSTEM_THREAD : PR_USER_THREAD; +} + +PR_IMPLEMENT(PRThreadState) PR_GetThreadState(const PRThread *thread) +{ + return (NULL == thread->term) ? PR_UNJOINABLE_THREAD : PR_JOINABLE_THREAD; +} /* PR_GetThreadState */ diff --git a/mozilla/nsprpub/pr/src/threads/prdump.c b/mozilla/nsprpub/pr/src/threads/prdump.c new file mode 100644 index 0000000..3ea884d --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/prdump.c @@ -0,0 +1,153 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +#if defined(WIN95) +/* +** Some local variables report warnings on Win95 because the code paths +** using them are conditioned on HAVE_CUSTOME_USER_THREADS. +** The pragma suppresses the warning. +** +*/ +#pragma warning(disable : 4101) +#endif + +/* XXX use unbuffered nspr stdio */ + +PRFileDesc *_pr_dumpOut; + +PRUint32 _PR_DumpPrintf(PRFileDesc *fd, const char *fmt, ...) +{ + char buf[100]; + PRUint32 nb; + va_list ap; + + va_start(ap, fmt); + nb = PR_vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + PR_Write(fd, buf, nb); + + return nb; +} + +void _PR_DumpThread(PRFileDesc *fd, PRThread *thread) +{ + +#ifndef _PR_GLOBAL_THREADS_ONLY + _PR_DumpPrintf(fd, "%05d[%08p] pri=%2d flags=0x%02x", + thread->id, thread, thread->priority, thread->flags); + switch (thread->state) { + case _PR_RUNNABLE: + case _PR_RUNNING: + break; + case _PR_LOCK_WAIT: + _PR_DumpPrintf(fd, " lock=%p", thread->wait.lock); + break; + case _PR_COND_WAIT: + _PR_DumpPrintf(fd, " condvar=%p sleep=%lldms", + thread->wait.cvar, thread->sleep); + break; + case _PR_SUSPENDED: + _PR_DumpPrintf(fd, " suspended"); + break; + } + PR_Write(fd, "\n", 1); +#endif + + /* Now call dump routine */ + if (thread->dump) { + thread->dump(fd, thread, thread->dumpArg); + } +} + +static void DumpThreadQueue(PRFileDesc *fd, PRCList *list) +{ +#ifndef _PR_GLOBAL_THREADS_ONLY + PRCList *q; + + q = list->next; + while (q != list) { + PRThread *t = _PR_THREAD_PTR(q); + _PR_DumpThread(fd, t); + q = q->next; + } +#endif +} + +void _PR_DumpThreads(PRFileDesc *fd) +{ + PRThread *t; + PRIntn i; + + _PR_DumpPrintf(fd, "Current Thread:\n"); + t = _PR_MD_CURRENT_THREAD(); + _PR_DumpThread(fd, t); + + _PR_DumpPrintf(fd, "Runnable Threads:\n"); + for (i = 0; i < 32; i++) { + DumpThreadQueue(fd, &_PR_RUNQ(t->cpu)[i]); + } + + _PR_DumpPrintf(fd, "CondVar timed wait Threads:\n"); + DumpThreadQueue(fd, &_PR_SLEEPQ(t->cpu)); + + _PR_DumpPrintf(fd, "CondVar wait Threads:\n"); + DumpThreadQueue(fd, &_PR_PAUSEQ(t->cpu)); + + _PR_DumpPrintf(fd, "Suspended Threads:\n"); + DumpThreadQueue(fd, &_PR_SUSPENDQ(t->cpu)); +} + +PR_IMPLEMENT(void) PR_ShowStatus(void) +{ + PRIntn is; + + if ( _PR_MD_CURRENT_THREAD() + && !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) _PR_INTSOFF(is); + _pr_dumpOut = _pr_stderr; + _PR_DumpThreads(_pr_dumpOut); + if ( _PR_MD_CURRENT_THREAD() + && !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) _PR_FAST_INTSON(is); +} + +PR_IMPLEMENT(void) +PR_SetThreadDumpProc(PRThread* thread, PRThreadDumpProc dump, void *arg) +{ + thread->dump = dump; + thread->dumpArg = arg; +} diff --git a/mozilla/nsprpub/pr/src/threads/prmon.c b/mozilla/nsprpub/pr/src/threads/prmon.c new file mode 100644 index 0000000..dc8a32c --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/prmon.c @@ -0,0 +1,231 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +/************************************************************************/ + +/* +** Create a new monitor. +*/ +PR_IMPLEMENT(PRMonitor*) PR_NewMonitor() +{ + PRMonitor *mon; + PRCondVar *cvar; + PRLock *lock; + + mon = PR_NEWZAP(PRMonitor); + if (mon) { + lock = PR_NewLock(); + if (!lock) { + PR_DELETE(mon); + return 0; + } + + cvar = PR_NewCondVar(lock); + if (!cvar) { + PR_DestroyLock(lock); + PR_DELETE(mon); + return 0; + } + mon->cvar = cvar; + mon->name = NULL; + } + return mon; +} + +PR_IMPLEMENT(PRMonitor*) PR_NewNamedMonitor(const char* name) +{ + PRMonitor* mon = PR_NewMonitor(); + if (mon) + mon->name = name; + return mon; +} + +/* +** Destroy a monitor. There must be no thread waiting on the monitor's +** condition variable. The caller is responsible for guaranteeing that the +** monitor is no longer in use. +*/ +PR_IMPLEMENT(void) PR_DestroyMonitor(PRMonitor *mon) +{ + PR_DestroyLock(mon->cvar->lock); + PR_DestroyCondVar(mon->cvar); + PR_DELETE(mon); +} + +/* +** Enter the lock associated with the monitor. +*/ +PR_IMPLEMENT(void) PR_EnterMonitor(PRMonitor *mon) +{ + if (mon->cvar->lock->owner == _PR_MD_CURRENT_THREAD()) { + mon->entryCount++; + } else { + PR_Lock(mon->cvar->lock); + mon->entryCount = 1; + } +} + +/* +** Test and then enter the lock associated with the monitor if it's not +** already entered by some other thread. Return PR_FALSE if some other +** thread owned the lock at the time of the call. +*/ +PR_IMPLEMENT(PRBool) PR_TestAndEnterMonitor(PRMonitor *mon) +{ + if (mon->cvar->lock->owner == _PR_MD_CURRENT_THREAD()) { + mon->entryCount++; + return PR_TRUE; + } else { + if (PR_TestAndLock(mon->cvar->lock)) { + mon->entryCount = 1; + return PR_TRUE; + } + } + return PR_FALSE; +} + +/* +** Exit the lock associated with the monitor once. +*/ +PR_IMPLEMENT(PRStatus) PR_ExitMonitor(PRMonitor *mon) +{ + if (mon->cvar->lock->owner != _PR_MD_CURRENT_THREAD()) { + return PR_FAILURE; + } + if (--mon->entryCount == 0) { + return PR_Unlock(mon->cvar->lock); + } + return PR_SUCCESS; +} + +/* +** Return the number of times that the current thread has entered the +** lock. Returns zero if the current thread has not entered the lock. +*/ +PR_IMPLEMENT(PRIntn) PR_GetMonitorEntryCount(PRMonitor *mon) +{ + return (mon->cvar->lock->owner == _PR_MD_CURRENT_THREAD()) ? + mon->entryCount : 0; +} + +/* +** If the current thread is in |mon|, this assertion is guaranteed to +** succeed. Otherwise, the behavior of this function is undefined. +*/ +PR_IMPLEMENT(void) PR_AssertCurrentThreadInMonitor(PRMonitor *mon) +{ + PR_ASSERT_CURRENT_THREAD_OWNS_LOCK(mon->cvar->lock); +} + +/* +** Wait for a notify on the condition variable. Sleep for "ticks" amount +** of time (if "tick" is 0 then the sleep is indefinite). While +** the thread is waiting it exits the monitors lock (as if it called +** PR_ExitMonitor as many times as it had called PR_EnterMonitor). When +** the wait has finished the thread regains control of the monitors lock +** with the same entry count as before the wait began. +** +** The thread waiting on the monitor will be resumed when the monitor is +** notified (assuming the thread is the next in line to receive the +** notify) or when the "ticks" elapses. +** +** Returns PR_FAILURE if the caller has not locked the lock associated +** with the condition variable. +** This routine can return PR_PENDING_INTERRUPT if the waiting thread +** has been interrupted. +*/ +PR_IMPLEMENT(PRStatus) PR_Wait(PRMonitor *mon, PRIntervalTime ticks) +{ + PRUintn entryCount; + PRStatus status; + PRThread *me = _PR_MD_CURRENT_THREAD(); + + if (mon->cvar->lock->owner != me) return PR_FAILURE; + + entryCount = mon->entryCount; + mon->entryCount = 0; + + status = _PR_WaitCondVar(me, mon->cvar, mon->cvar->lock, ticks); + + mon->entryCount = entryCount; + + return status; +} + +/* +** Notify the highest priority thread waiting on the condition +** variable. If a thread is waiting on the condition variable (using +** PR_Wait) then it is awakened and begins waiting on the monitor's lock. +*/ +PR_IMPLEMENT(PRStatus) PR_Notify(PRMonitor *mon) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + if (mon->cvar->lock->owner != me) return PR_FAILURE; + PR_NotifyCondVar(mon->cvar); + return PR_SUCCESS; +} + +/* +** Notify all of the threads waiting on the condition variable. All of +** threads are notified in turn. The highest priority thread will +** probably acquire the monitor first when the monitor is exited. +*/ +PR_IMPLEMENT(PRStatus) PR_NotifyAll(PRMonitor *mon) +{ + PRThread *me = _PR_MD_CURRENT_THREAD(); + if (mon->cvar->lock->owner != me) return PR_FAILURE; + PR_NotifyAllCondVar(mon->cvar); + return PR_SUCCESS; +} + +/************************************************************************/ + +PRUint32 _PR_MonitorToString(PRMonitor *mon, char *buf, PRUint32 buflen) +{ + PRUint32 nb; + + if (mon->cvar->lock->owner) { + nb = PR_snprintf(buf, buflen, "[%p] owner=%d[%p] count=%ld", + mon, mon->cvar->lock->owner->id, + mon->cvar->lock->owner, mon->entryCount); + } else { + nb = PR_snprintf(buf, buflen, "[%p]", mon); + } + return nb; +} diff --git a/mozilla/nsprpub/pr/src/threads/prrwlock.c b/mozilla/nsprpub/pr/src/threads/prrwlock.c new file mode 100644 index 0000000..35e0e3e --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/prrwlock.c @@ -0,0 +1,512 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" + +#include <string.h> + +#if defined(HPUX) && defined(_PR_PTHREADS) && !defined(_PR_DCETHREADS) + +#include <pthread.h> +#define HAVE_UNIX98_RWLOCK +#define RWLOCK_T pthread_rwlock_t +#define RWLOCK_INIT(lock) pthread_rwlock_init(lock, NULL) +#define RWLOCK_DESTROY(lock) pthread_rwlock_destroy(lock) +#define RWLOCK_RDLOCK(lock) pthread_rwlock_rdlock(lock) +#define RWLOCK_WRLOCK(lock) pthread_rwlock_wrlock(lock) +#define RWLOCK_UNLOCK(lock) pthread_rwlock_unlock(lock) + +#elif defined(SOLARIS) && (defined(_PR_PTHREADS) \ + || defined(_PR_GLOBAL_THREADS_ONLY)) + +#include <synch.h> +#define HAVE_UI_RWLOCK +#define RWLOCK_T rwlock_t +#define RWLOCK_INIT(lock) rwlock_init(lock, USYNC_THREAD, NULL) +#define RWLOCK_DESTROY(lock) rwlock_destroy(lock) +#define RWLOCK_RDLOCK(lock) rw_rdlock(lock) +#define RWLOCK_WRLOCK(lock) rw_wrlock(lock) +#define RWLOCK_UNLOCK(lock) rw_unlock(lock) + +#endif + +/* + * Reader-writer lock + */ +struct PRRWLock { + char *rw_name; /* lock name */ + PRUint32 rw_rank; /* rank of the lock */ + +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) + RWLOCK_T rw_lock; +#else + PRLock *rw_lock; + PRInt32 rw_lock_cnt; /* == 0, if unlocked */ + /* == -1, if write-locked */ + /* > 0 , # of read locks */ + PRUint32 rw_reader_cnt; /* number of waiting readers */ + PRUint32 rw_writer_cnt; /* number of waiting writers */ + PRCondVar *rw_reader_waitq; /* cvar for readers */ + PRCondVar *rw_writer_waitq; /* cvar for writers */ +#ifdef DEBUG + PRThread *rw_owner; /* lock owner for write-lock */ +#endif +#endif +}; + +#ifdef DEBUG +#define _PR_RWLOCK_RANK_ORDER_DEBUG /* enable deadlock detection using + rank-order for locks + */ +#endif + +#ifdef _PR_RWLOCK_RANK_ORDER_DEBUG + +static PRUintn pr_thread_rwlock_key; /* TPD key for lock stack */ +static PRUintn pr_thread_rwlock_alloc_failed; + +#define _PR_RWLOCK_RANK_ORDER_LIMIT 10 + +typedef struct thread_rwlock_stack { + PRInt32 trs_index; /* top of stack */ + PRRWLock *trs_stack[_PR_RWLOCK_RANK_ORDER_LIMIT]; /* stack of lock + pointers */ + +} thread_rwlock_stack; + +static void _PR_SET_THREAD_RWLOCK_RANK(PRRWLock *rwlock); +static PRUint32 _PR_GET_THREAD_RWLOCK_RANK(void); +static void _PR_UNSET_THREAD_RWLOCK_RANK(PRRWLock *rwlock); +static void _PR_RELEASE_LOCK_STACK(void *lock_stack); + +#endif + +/* + * Reader/Writer Locks + */ + +/* + * PR_NewRWLock + * Create a reader-writer lock, with the given lock rank and lock name + * + */ + +PR_IMPLEMENT(PRRWLock *) +PR_NewRWLock(PRUint32 lock_rank, const char *lock_name) +{ + PRRWLock *rwlock; +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) + int err; +#endif + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + rwlock = PR_NEWZAP(PRRWLock); + if (rwlock == NULL) + return NULL; + + rwlock->rw_rank = lock_rank; + if (lock_name != NULL) { + rwlock->rw_name = (char*) PR_Malloc(strlen(lock_name) + 1); + if (rwlock->rw_name == NULL) { + PR_DELETE(rwlock); + return(NULL); + } + strcpy(rwlock->rw_name, lock_name); + } else { + rwlock->rw_name = NULL; + } + +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) + err = RWLOCK_INIT(&rwlock->rw_lock); + if (err != 0) { + PR_SetError(PR_UNKNOWN_ERROR, err); + PR_Free(rwlock->rw_name); + PR_DELETE(rwlock); + return NULL; + } + return rwlock; +#else + rwlock->rw_lock = PR_NewLock(); + if (rwlock->rw_lock == NULL) { + goto failed; + } + rwlock->rw_reader_waitq = PR_NewCondVar(rwlock->rw_lock); + if (rwlock->rw_reader_waitq == NULL) { + goto failed; + } + rwlock->rw_writer_waitq = PR_NewCondVar(rwlock->rw_lock); + if (rwlock->rw_writer_waitq == NULL) { + goto failed; + } + rwlock->rw_reader_cnt = 0; + rwlock->rw_writer_cnt = 0; + rwlock->rw_lock_cnt = 0; + return rwlock; + +failed: + if (rwlock->rw_reader_waitq != NULL) { + PR_DestroyCondVar(rwlock->rw_reader_waitq); + } + if (rwlock->rw_lock != NULL) { + PR_DestroyLock(rwlock->rw_lock); + } + PR_Free(rwlock->rw_name); + PR_DELETE(rwlock); + return NULL; +#endif +} + +/* +** Destroy the given RWLock "lock". +*/ +PR_IMPLEMENT(void) +PR_DestroyRWLock(PRRWLock *rwlock) +{ +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) + int err; + err = RWLOCK_DESTROY(&rwlock->rw_lock); + PR_ASSERT(err == 0); +#else + PR_ASSERT(rwlock->rw_reader_cnt == 0); + PR_DestroyCondVar(rwlock->rw_reader_waitq); + PR_DestroyCondVar(rwlock->rw_writer_waitq); + PR_DestroyLock(rwlock->rw_lock); +#endif + if (rwlock->rw_name != NULL) + PR_Free(rwlock->rw_name); + PR_DELETE(rwlock); +} + +/* +** Read-lock the RWLock. +*/ +PR_IMPLEMENT(void) +PR_RWLock_Rlock(PRRWLock *rwlock) +{ +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) +int err; +#endif + +#ifdef _PR_RWLOCK_RANK_ORDER_DEBUG + /* + * assert that rank ordering is not violated; the rank of 'rwlock' should + * be equal to or greater than the highest rank of all the locks held by + * the thread. + */ + PR_ASSERT((rwlock->rw_rank == PR_RWLOCK_RANK_NONE) || + (rwlock->rw_rank >= _PR_GET_THREAD_RWLOCK_RANK())); +#endif + +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) + err = RWLOCK_RDLOCK(&rwlock->rw_lock); + PR_ASSERT(err == 0); +#else + PR_Lock(rwlock->rw_lock); + /* + * wait if write-locked or if a writer is waiting; preference for writers + */ + while ((rwlock->rw_lock_cnt < 0) || + (rwlock->rw_writer_cnt > 0)) { + rwlock->rw_reader_cnt++; + PR_WaitCondVar(rwlock->rw_reader_waitq, PR_INTERVAL_NO_TIMEOUT); + rwlock->rw_reader_cnt--; + } + /* + * Increment read-lock count + */ + rwlock->rw_lock_cnt++; + + PR_Unlock(rwlock->rw_lock); +#endif + +#ifdef _PR_RWLOCK_RANK_ORDER_DEBUG + /* + * update thread's lock rank + */ + _PR_SET_THREAD_RWLOCK_RANK(rwlock); +#endif +} + +/* +** Write-lock the RWLock. +*/ +PR_IMPLEMENT(void) +PR_RWLock_Wlock(PRRWLock *rwlock) +{ +#if defined(DEBUG) +PRThread *me = PR_GetCurrentThread(); +#endif +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) +int err; +#endif + +#ifdef _PR_RWLOCK_RANK_ORDER_DEBUG + /* + * assert that rank ordering is not violated; the rank of 'rwlock' should + * be equal to or greater than the highest rank of all the locks held by + * the thread. + */ + PR_ASSERT((rwlock->rw_rank == PR_RWLOCK_RANK_NONE) || + (rwlock->rw_rank >= _PR_GET_THREAD_RWLOCK_RANK())); +#endif + +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) + err = RWLOCK_WRLOCK(&rwlock->rw_lock); + PR_ASSERT(err == 0); +#else + PR_Lock(rwlock->rw_lock); + /* + * wait if read locked + */ + while (rwlock->rw_lock_cnt != 0) { + rwlock->rw_writer_cnt++; + PR_WaitCondVar(rwlock->rw_writer_waitq, PR_INTERVAL_NO_TIMEOUT); + rwlock->rw_writer_cnt--; + } + /* + * apply write lock + */ + rwlock->rw_lock_cnt--; + PR_ASSERT(rwlock->rw_lock_cnt == -1); +#ifdef DEBUG + PR_ASSERT(me != NULL); + rwlock->rw_owner = me; +#endif + PR_Unlock(rwlock->rw_lock); +#endif + +#ifdef _PR_RWLOCK_RANK_ORDER_DEBUG + /* + * update thread's lock rank + */ + _PR_SET_THREAD_RWLOCK_RANK(rwlock); +#endif +} + +/* +** Unlock the RW lock. +*/ +PR_IMPLEMENT(void) +PR_RWLock_Unlock(PRRWLock *rwlock) +{ +#if defined(DEBUG) +PRThread *me = PR_GetCurrentThread(); +#endif +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) +int err; +#endif + +#if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) + err = RWLOCK_UNLOCK(&rwlock->rw_lock); + PR_ASSERT(err == 0); +#else + PR_Lock(rwlock->rw_lock); + /* + * lock must be read or write-locked + */ + PR_ASSERT(rwlock->rw_lock_cnt != 0); + if (rwlock->rw_lock_cnt > 0) { + + /* + * decrement read-lock count + */ + rwlock->rw_lock_cnt--; + if (rwlock->rw_lock_cnt == 0) { + /* + * lock is not read-locked anymore; wakeup a waiting writer + */ + if (rwlock->rw_writer_cnt > 0) + PR_NotifyCondVar(rwlock->rw_writer_waitq); + } + } else { + PR_ASSERT(rwlock->rw_lock_cnt == -1); + + rwlock->rw_lock_cnt = 0; +#ifdef DEBUG + PR_ASSERT(rwlock->rw_owner == me); + rwlock->rw_owner = NULL; +#endif + /* + * wakeup a writer, if present; preference for writers + */ + if (rwlock->rw_writer_cnt > 0) + PR_NotifyCondVar(rwlock->rw_writer_waitq); + /* + * else, wakeup all readers, if any + */ + else if (rwlock->rw_reader_cnt > 0) + PR_NotifyAllCondVar(rwlock->rw_reader_waitq); + } + PR_Unlock(rwlock->rw_lock); +#endif + +#ifdef _PR_RWLOCK_RANK_ORDER_DEBUG + /* + * update thread's lock rank + */ + _PR_UNSET_THREAD_RWLOCK_RANK(rwlock); +#endif + return; +} + +#ifndef _PR_RWLOCK_RANK_ORDER_DEBUG + +void _PR_InitRWLocks(void) { } + +#else + +void _PR_InitRWLocks(void) +{ + /* + * allocated thread-private-data index for rwlock list + */ + if (PR_NewThreadPrivateIndex(&pr_thread_rwlock_key, + _PR_RELEASE_LOCK_STACK) == PR_FAILURE) { + pr_thread_rwlock_alloc_failed = 1; + return; + } +} + +/* + * _PR_SET_THREAD_RWLOCK_RANK + * Set a thread's lock rank, which is the highest of the ranks of all + * the locks held by the thread. Pointers to the locks are added to a + * per-thread list, which is anchored off a thread-private data key. + */ + +static void +_PR_SET_THREAD_RWLOCK_RANK(PRRWLock *rwlock) +{ +thread_rwlock_stack *lock_stack; +PRStatus rv; + + /* + * allocate a lock stack + */ + if ((lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key)) == NULL) { + lock_stack = (thread_rwlock_stack *) + PR_CALLOC(1 * sizeof(thread_rwlock_stack)); + if (lock_stack) { + rv = PR_SetThreadPrivate(pr_thread_rwlock_key, lock_stack); + if (rv == PR_FAILURE) { + PR_DELETE(lock_stack); + pr_thread_rwlock_alloc_failed = 1; + return; + } + } else { + pr_thread_rwlock_alloc_failed = 1; + return; + } + } + /* + * add rwlock to lock stack, if limit is not exceeded + */ + if (lock_stack) { + if (lock_stack->trs_index < _PR_RWLOCK_RANK_ORDER_LIMIT) + lock_stack->trs_stack[lock_stack->trs_index++] = rwlock; + } +} + +static void +_PR_RELEASE_LOCK_STACK(void *lock_stack) +{ + PR_ASSERT(lock_stack); + PR_DELETE(lock_stack); +} + +/* + * _PR_GET_THREAD_RWLOCK_RANK + * + * return thread's lock rank. If thread-private-data for the lock + * stack is not allocated, return PR_RWLOCK_RANK_NONE. + */ + +static PRUint32 +_PR_GET_THREAD_RWLOCK_RANK(void) +{ + thread_rwlock_stack *lock_stack; + + if ((lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key)) == NULL) + return (PR_RWLOCK_RANK_NONE); + else + return(lock_stack->trs_stack[lock_stack->trs_index - 1]->rw_rank); +} + +/* + * _PR_UNSET_THREAD_RWLOCK_RANK + * + * remove the rwlock from the lock stack. Since locks may not be + * unlocked in a FIFO order, the entire lock stack is searched. + */ + +static void +_PR_UNSET_THREAD_RWLOCK_RANK(PRRWLock *rwlock) +{ + thread_rwlock_stack *lock_stack; + int new_index = 0, index, done = 0; + + lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key); + + PR_ASSERT(lock_stack != NULL); + + index = lock_stack->trs_index - 1; + while (index-- >= 0) { + if ((lock_stack->trs_stack[index] == rwlock) && !done) { + /* + * reset the slot for rwlock + */ + lock_stack->trs_stack[index] = NULL; + done = 1; + } + /* + * search for the lowest-numbered empty slot, above which there are + * no non-empty slots + */ + if ((lock_stack->trs_stack[index] != NULL) && !new_index) + new_index = index + 1; + if (done && new_index) + break; + } + /* + * set top of stack to highest numbered empty slot + */ + lock_stack->trs_index = new_index; + +} + +#endif /* _PR_RWLOCK_RANK_ORDER_DEBUG */ diff --git a/mozilla/nsprpub/pr/src/threads/prsem.c b/mozilla/nsprpub/pr/src/threads/prsem.c new file mode 100644 index 0000000..d26897c --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/prsem.c @@ -0,0 +1,170 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "primpl.h" +#include "obsolete/prsem.h" + +/************************************************************************/ + +/* +** Create a new semaphore. +*/ +PR_IMPLEMENT(PRSemaphore*) PR_NewSem(PRUintn value) +{ + PRSemaphore *sem; + PRCondVar *cvar; + PRLock *lock; + + sem = PR_NEWZAP(PRSemaphore); + if (sem) { +#ifdef HAVE_CVAR_BUILT_ON_SEM + _PR_MD_NEW_SEM(&sem->md, value); +#else + lock = PR_NewLock(); + if (!lock) { + PR_DELETE(sem); + return NULL; + } + + cvar = PR_NewCondVar(lock); + if (!cvar) { + PR_DestroyLock(lock); + PR_DELETE(sem); + return NULL; + } + sem->cvar = cvar; + sem->count = value; +#endif + } + return sem; +} + +/* +** Destroy a semaphore. There must be no thread waiting on the semaphore. +** The caller is responsible for guaranteeing that the semaphore is +** no longer in use. +*/ +PR_IMPLEMENT(void) PR_DestroySem(PRSemaphore *sem) +{ +#ifdef HAVE_CVAR_BUILT_ON_SEM + _PR_MD_DESTROY_SEM(&sem->md); +#else + PR_ASSERT(sem->waiters == 0); + + PR_DestroyLock(sem->cvar->lock); + PR_DestroyCondVar(sem->cvar); +#endif + PR_DELETE(sem); +} + +/* +** Wait on a Semaphore. +** +** This routine allows a calling thread to wait or proceed depending upon the +** state of the semahore sem. The thread can proceed only if the counter value +** of the semaphore sem is currently greater than 0. If the value of semaphore +** sem is positive, it is decremented by one and the routine returns immediately +** allowing the calling thread to continue. If the value of semaphore sem is 0, +** the calling thread blocks awaiting the semaphore to be released by another +** thread. +** +** This routine can return PR_PENDING_INTERRUPT if the waiting thread +** has been interrupted. +*/ +PR_IMPLEMENT(PRStatus) PR_WaitSem(PRSemaphore *sem) +{ + PRStatus status = PR_SUCCESS; + +#ifdef HAVE_CVAR_BUILT_ON_SEM + return _PR_MD_WAIT_SEM(&sem->md); +#else + PR_Lock(sem->cvar->lock); + while (sem->count == 0) { + sem->waiters++; + status = PR_WaitCondVar(sem->cvar, PR_INTERVAL_NO_TIMEOUT); + sem->waiters--; + if (status != PR_SUCCESS) + break; + } + if (status == PR_SUCCESS) + sem->count--; + PR_Unlock(sem->cvar->lock); +#endif + + return (status); +} + +/* +** This routine increments the counter value of the semaphore. If other threads +** are blocked for the semaphore, then the scheduler will determine which ONE +** thread will be unblocked. +*/ +PR_IMPLEMENT(void) PR_PostSem(PRSemaphore *sem) +{ +#ifdef HAVE_CVAR_BUILT_ON_SEM + _PR_MD_POST_SEM(&sem->md); +#else + PR_Lock(sem->cvar->lock); + if (sem->waiters) + PR_NotifyCondVar(sem->cvar); + sem->count++; + PR_Unlock(sem->cvar->lock); +#endif +} + +#if DEBUG +/* +** Returns the value of the semaphore referenced by sem without affecting +** the state of the semaphore. The value represents the semaphore vaule +** at the time of the call, but may not be the actual value when the +** caller inspects it. (FOR DEBUGGING ONLY) +*/ +PR_IMPLEMENT(PRUintn) PR_GetValueSem(PRSemaphore *sem) +{ + PRUintn rv; + +#ifdef HAVE_CVAR_BUILT_ON_SEM + rv = _PR_MD_GET_VALUE_SEM(&sem->md); +#else + PR_Lock(sem->cvar->lock); + rv = sem->count; + PR_Unlock(sem->cvar->lock); +#endif + + return rv; +} +#endif diff --git a/mozilla/nsprpub/pr/src/threads/prtpd.c b/mozilla/nsprpub/pr/src/threads/prtpd.c new file mode 100644 index 0000000..5c53313 --- /dev/null +++ b/mozilla/nsprpub/pr/src/threads/prtpd.c @@ -0,0 +1,280 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is the Netscape Portable Runtime (NSPR). + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998-2000 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +/* +** Thread Private Data +** +** There is an aribitrary limit on the number of keys that will be allocated +** by the runtime. It's largish, so it is intended to be a sanity check, not +** an impediment. +** +** There is a counter, initialized to zero and incremented every time a +** client asks for a new key, that holds the high water mark for keys. All +** threads logically have the same high water mark and are permitted to +** ask for TPD up to that key value. +** +** The vector to hold the TPD are allocated when PR_SetThreadPrivate() is +** called. The size of the vector will be some value greater than or equal +** to the current high water mark. Each thread has its own TPD length and +** vector. +** +** Threads that get private data for keys they have not set (or perhaps +** don't even exist for that thread) get a NULL return. If the key is +** beyond the high water mark, an error will be returned. +*/ + +/* +** As of this time, BeOS has its own TPD implementation. Integrating +** this standard one is a TODO for anyone with a bit of spare time on +** their hand. For now, we just #ifdef out this whole file and use +** the routines in pr/src/btthreads/ +*/ + +#ifndef XP_BEOS + +#include "primpl.h" + +#include <string.h> + +#if defined(WIN95) +/* +** Some local variables report warnings on Win95 because the code paths +** using them are conditioned on HAVE_CUSTOME_USER_THREADS. +** The pragma suppresses the warning. +** +*/ +#pragma warning(disable : 4101) +#endif + +#define _PR_TPD_LIMIT 128 /* arbitary limit on the TPD slots */ +static PRInt32 _pr_tpd_length = 0; /* current length of destructor vector */ +static PRInt32 _pr_tpd_highwater = 0; /* next TPD key to be assigned */ +static PRThreadPrivateDTOR *_pr_tpd_destructors = NULL; + /* the destructors are associated with + the keys, therefore asserting that + the TPD key depicts the data's 'type' */ + +/* +** Initialize the thread private data manipulation +*/ +void _PR_InitTPD(void) +{ + _pr_tpd_destructors = (PRThreadPrivateDTOR*) + PR_CALLOC(_PR_TPD_LIMIT * sizeof(PRThreadPrivateDTOR*)); + PR_ASSERT(NULL != _pr_tpd_destructors); + _pr_tpd_length = _PR_TPD_LIMIT; +} + +/* +** Clean up the thread private data manipulation +*/ +void _PR_CleanupTPD(void) +{ +} /* _PR_CleanupTPD */ + +/* +** This routine returns a new index for per-thread-private data table. +** The index is visible to all threads within a process. This index can +** be used with the PR_SetThreadPrivate() and PR_GetThreadPrivate() routines +** to save and retrieve data associated with the index for a thread. +** +** The index independently maintains specific values for each binding thread. +** A thread can only get access to its own thread-specific-data. +** +** Upon a new index return the value associated with the index for all threads +** is NULL, and upon thread creation the value associated with all indices for +** that thread is NULL. +** +** "dtor" is the destructor function to invoke when the private +** data is set or destroyed +** +** Returns PR_FAILURE if the total number of indices will exceed the maximun +** allowed. +*/ + +PR_IMPLEMENT(PRStatus) PR_NewThreadPrivateIndex( + PRUintn *newIndex, PRThreadPrivateDTOR dtor) +{ + PRStatus rv; + PRInt32 index; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + PR_ASSERT(NULL != newIndex); + PR_ASSERT(NULL != _pr_tpd_destructors); + + index = PR_ATOMIC_INCREMENT(&_pr_tpd_highwater) - 1; /* allocate index */ + if (_PR_TPD_LIMIT <= index) + { + PR_SetError(PR_TPD_RANGE_ERROR, 0); + rv = PR_FAILURE; /* that's just wrong */ + } + else + { + _pr_tpd_destructors[index] = dtor; /* record destructor @index */ + *newIndex = (PRUintn)index; /* copy into client's location */ + rv = PR_SUCCESS; /* that's okay */ + } + + return rv; +} + +/* +** Define some per-thread-private data. +** "index" is an index into the per-thread private data table +** "priv" is the per-thread-private data +** +** If the per-thread private data table has a previously registered +** destructor function and a non-NULL per-thread-private data value, +** the destructor function is invoked. +** +** This can return PR_FAILURE if index is invalid (ie., beyond the current +** high water mark) or memory is insufficient to allocate an exanded vector. +*/ + +PR_IMPLEMENT(PRStatus) PR_SetThreadPrivate(PRUintn index, void *priv) +{ + PRThread *self = PR_GetCurrentThread(); + + /* + ** The index being set might not have a sufficient vector in this + ** thread. But if the index has been allocated, it's okay to go + ** ahead and extend this one now. + */ + if ((index >= _PR_TPD_LIMIT) || (index >= _pr_tpd_highwater)) + { + PR_SetError(PR_TPD_RANGE_ERROR, 0); + return PR_FAILURE; + } + + PR_ASSERT(((NULL == self->privateData) && (0 == self->tpdLength)) + || ((NULL != self->privateData) && (0 != self->tpdLength))); + + if ((NULL == self->privateData) || (self->tpdLength <= index)) + { + void *extension = PR_CALLOC(_pr_tpd_length * sizeof(void*)); + if (NULL == extension) + { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + return PR_FAILURE; + } + if (self->privateData) { + (void)memcpy( + extension, self->privateData, + self->tpdLength * sizeof(void*)); + PR_DELETE(self->privateData); + } + self->tpdLength = _pr_tpd_length; + self->privateData = (void**)extension; + } + /* + ** There wasn't much chance of having to call the destructor + ** unless the slot already existed. + */ + else if (self->privateData[index] && _pr_tpd_destructors[index]) + { + void *data = self->privateData[index]; + self->privateData[index] = NULL; + (*_pr_tpd_destructors[index])(data); + } + + PR_ASSERT(index < self->tpdLength); + self->privateData[index] = priv; + + return PR_SUCCESS; +} + +/* +** Recover the per-thread-private data for the current thread. "index" is +** the index into the per-thread private data table. +** +** The returned value may be NULL which is indistinguishable from an error +** condition. +** +*/ + +PR_IMPLEMENT(void*) PR_GetThreadPrivate(PRUintn index) +{ + PRThread *self = PR_GetCurrentThread(); + void *tpd = ((NULL == self->privateData) || (index >= self->tpdLength)) ? + NULL : self->privateData[index]; + + return tpd; +} + +/* +** Destroy the thread's private data, if any exists. This is called at +** thread termination time only. There should be no threading issues +** since this is being called by the thread itself. +*/ +void _PR_DestroyThreadPrivate(PRThread* self) +{ +#define _PR_TPD_DESTRUCTOR_ITERATIONS 4 + + if (NULL != self->privateData) /* we have some */ + { + PRBool clean; + PRUint32 index; + PRInt32 passes = _PR_TPD_DESTRUCTOR_ITERATIONS; + PR_ASSERT(0 != self->tpdLength); + do + { + clean = PR_TRUE; + for (index = 0; index < self->tpdLength; ++index) + { + void *priv = self->privateData[index]; /* extract */ + if (NULL != priv) /* we have data at this index */ + { + if (NULL != _pr_tpd_destructors[index]) + { + self->privateData[index] = NULL; /* precondition */ + (*_pr_tpd_destructors[index])(priv); /* destroy */ + clean = PR_FALSE; /* unknown side effects */ + } + } + } + } while ((--passes > 0) && !clean); /* limit # of passes */ + /* + ** We give up after a fixed number of passes. Any non-NULL + ** thread-private data value with a registered destructor + ** function is not destroyed. + */ + memset(self->privateData, 0, self->tpdLength * sizeof(void*)); + } +} /* _PR_DestroyThreadPrivate */ + +#endif /* !XP_BEOS */ |