diff options
48 files changed, 246 insertions, 244 deletions
diff --git a/ghc/hschooks.c b/ghc/hschooks.c index b8a720b209..4e6e66d3e2 100644 --- a/ghc/hschooks.c +++ b/ghc/hschooks.c @@ -40,7 +40,7 @@ defaultsHook (void)  }  void -StackOverflowHook (lnat stack_size)    /* in bytes */ +StackOverflowHook (StgWord stack_size)    /* in bytes */  {      fprintf(stderr, "GHC stack-space overflow: current limit is %zu bytes.\nUse the `-K<size>' option to increase it.\n", (size_t)stack_size);  } diff --git a/includes/mkDerivedConstants.c b/includes/mkDerivedConstants.c index 3fcf12849f..465551f39a 100644 --- a/includes/mkDerivedConstants.c +++ b/includes/mkDerivedConstants.c @@ -225,7 +225,7 @@ main(int argc, char *argv[])      printf("#define BLOCK_SIZE   %u\n", BLOCK_SIZE);      printf("#define MBLOCK_SIZE   %u\n", MBLOCK_SIZE); -    printf("#define BLOCKS_PER_MBLOCK  %" FMT_SizeT "\n", (lnat)BLOCKS_PER_MBLOCK); +    printf("#define BLOCKS_PER_MBLOCK  %" FMT_SizeT "\n", (W_)BLOCKS_PER_MBLOCK);      // could be derived, but better to save doing the calculation twice      printf("\n\n"); diff --git a/includes/rts/Hooks.h b/includes/rts/Hooks.h index f409205b87..f536afaa09 100644 --- a/includes/rts/Hooks.h +++ b/includes/rts/Hooks.h @@ -18,9 +18,9 @@ extern char *ghc_rts_opts;  extern void OnExitHook (void);  extern int  NoRunnableThreadsHook (void); -extern void StackOverflowHook (lnat stack_size); -extern void OutOfHeapHook (lnat request_size, lnat heap_size); -extern void MallocFailHook (lnat request_size /* in bytes */, char *msg); +extern void StackOverflowHook (W_ stack_size); +extern void OutOfHeapHook (W_ request_size, W_ heap_size); +extern void MallocFailHook (W_ request_size /* in bytes */, char *msg);  extern void defaultsHook (void);  #endif /* RTS_HOOKS_H */ diff --git a/includes/rts/SpinLock.h b/includes/rts/SpinLock.h index 8b337de73f..63a9395e18 100644 --- a/includes/rts/SpinLock.h +++ b/includes/rts/SpinLock.h @@ -34,7 +34,7 @@ typedef struct SpinLock_  typedef StgWord SpinLock;  #endif -typedef lnat SpinLockCount; +typedef StgWord SpinLockCount;  #if defined(PROF_SPIN) diff --git a/includes/rts/Types.h b/includes/rts/Types.h index ff42cdab1f..aacbfdc0b8 100644 --- a/includes/rts/Types.h +++ b/includes/rts/Types.h @@ -16,8 +16,10 @@  #include <stddef.h> -typedef unsigned int    nat;           /* at least 32 bits (like int) */ -typedef          size_t lnat;          /* at least 32 bits            */ +typedef unsigned int     nat;           /* at least 32 bits (like int) */ + +// Deprecated; just use StgWord instead +typedef StgWord lnat;  /* ullong (64|128-bit) type: only include if needed (not ANSI) */  #if defined(__GNUC__)  diff --git a/includes/rts/storage/ClosureMacros.h b/includes/rts/storage/ClosureMacros.h index c6b29aa5b8..146564a17f 100644 --- a/includes/rts/storage/ClosureMacros.h +++ b/includes/rts/storage/ClosureMacros.h @@ -429,20 +429,20 @@ EXTERN_INLINE StgWord stack_frame_sizeW( StgClosure *frame )     -------------------------------------------------------------------------- */  // The number of card bytes needed -INLINE_HEADER lnat mutArrPtrsCards (lnat elems) +INLINE_HEADER W_ mutArrPtrsCards (W_ elems)  { -    return (lnat)((elems + (1 << MUT_ARR_PTRS_CARD_BITS) - 1) +    return (W_)((elems + (1 << MUT_ARR_PTRS_CARD_BITS) - 1)                             >> MUT_ARR_PTRS_CARD_BITS);  }  // The number of words in the card table -INLINE_HEADER lnat mutArrPtrsCardTableSize (lnat elems) +INLINE_HEADER W_ mutArrPtrsCardTableSize (W_ elems)  {      return ROUNDUP_BYTES_TO_WDS(mutArrPtrsCards(elems));  }  // The address of the card for a particular card number -INLINE_HEADER StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, lnat n) +INLINE_HEADER StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, W_ n)  {      return ((StgWord8 *)&(a->payload[a->ptrs]) + n);  } diff --git a/includes/rts/storage/GC.h b/includes/rts/storage/GC.h index 5de8b2be4a..11c8560a54 100644 --- a/includes/rts/storage/GC.h +++ b/includes/rts/storage/GC.h @@ -149,8 +149,8 @@ extern generation * oldest_gen;     -------------------------------------------------------------------------- */ -StgPtr  allocate        ( Capability *cap, lnat n ); -StgPtr  allocatePinned  ( Capability *cap, lnat n ); +StgPtr  allocate        ( Capability *cap, W_ n ); +StgPtr  allocatePinned  ( Capability *cap, W_ n );  /* memory allocator for executable memory */  void * allocateExec(unsigned int len, void **exec_addr); diff --git a/includes/rts/storage/MBlock.h b/includes/rts/storage/MBlock.h index 69b3742514..7a5eb22cc9 100644 --- a/includes/rts/storage/MBlock.h +++ b/includes/rts/storage/MBlock.h @@ -12,8 +12,8 @@  #ifndef RTS_STORAGE_MBLOCK_H  #define RTS_STORAGE_MBLOCK_H -extern lnat peak_mblocks_allocated; -extern lnat mblocks_allocated; +extern W_ peak_mblocks_allocated; +extern W_ mblocks_allocated;  extern void initMBlocks(void);  extern void * getMBlock(void); @@ -156,7 +156,7 @@ typedef struct {      MBlockMapLine lines[MBLOCK_MAP_ENTRIES];  } MBlockMap; -extern lnat mpc_misses; +extern W_ mpc_misses;  StgBool HEAP_ALLOCED_miss(StgWord mblock, void *p); diff --git a/rts/Arena.c b/rts/Arena.c index 653eb69706..361c6c41be 100644 --- a/rts/Arena.c +++ b/rts/Arena.c @@ -80,7 +80,7 @@ arenaAlloc( Arena *arena, size_t size )  	return p;      } else {  	// allocate a fresh block... -	req_blocks =  (lnat)BLOCK_ROUND_UP(size) / BLOCK_SIZE; +	req_blocks =  (W_)BLOCK_ROUND_UP(size) / BLOCK_SIZE;  	bd = allocGroup_lock(req_blocks);  	arena_blocks += req_blocks; diff --git a/rts/Capability.h b/rts/Capability.h index 6c417160ad..1b3c06f5d3 100644 --- a/rts/Capability.h +++ b/rts/Capability.h @@ -123,7 +123,7 @@ struct Capability_ {      SparkCounters spark_stats;  #endif      // Total words allocated by this cap since rts start -    lnat total_allocated; +    W_ total_allocated;      // Per-capability STM-related data      StgTVarWatchQueue *free_tvar_watch_queues; diff --git a/rts/Disassembler.c b/rts/Disassembler.c index 033af11f64..7059d8b018 100644 --- a/rts/Disassembler.c +++ b/rts/Disassembler.c @@ -80,7 +80,7 @@ disInstr ( StgBCO *bco, int pc )           pc += 1; break;       case bci_STKCHECK:  {           StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1; -         debugBelch("STKCHECK %" FMT_SizeT "\n", (lnat)stk_words_reqd ); +         debugBelch("STKCHECK %" FMT_SizeT "\n", (W_)stk_words_reqd );           break;       }        case bci_PUSH_L:  diff --git a/rts/FrontPanel.c b/rts/FrontPanel.c index d6269fb5b3..b0b9bced4a 100644 --- a/rts/FrontPanel.c +++ b/rts/FrontPanel.c @@ -296,7 +296,7 @@ numLabel( GtkWidget *lbl, nat n )  }  void -updateFrontPanelAfterGC( nat N, lnat live ) +updateFrontPanelAfterGC( nat N, W_ live )  {      char buf[1000]; diff --git a/rts/FrontPanel.h b/rts/FrontPanel.h index 1669c2bf94..84e40d5e1b 100644 --- a/rts/FrontPanel.h +++ b/rts/FrontPanel.h @@ -19,7 +19,7 @@  void initFrontPanel( void );  void stopFrontPanel( void );  void updateFrontPanelBeforeGC( nat N ); -void updateFrontPanelAfterGC( nat N, lnat live ); +void updateFrontPanelAfterGC( nat N, W_ live );  void updateFrontPanel( void ); diff --git a/rts/Linker.c b/rts/Linker.c index bf0045616e..50dc9befac 100644 --- a/rts/Linker.c +++ b/rts/Linker.c @@ -1900,7 +1900,7 @@ mmap_again:                      MAP_PRIVATE|TRY_MAP_32BIT|fixed|flags, fd, 0);     if (result == MAP_FAILED) { -       sysErrorBelch("mmap %" FMT_SizeT " bytes at %p",(lnat)size,map_addr); +       sysErrorBelch("mmap %" FMT_SizeT " bytes at %p",(W_)size,map_addr);         errorBelch("Try specifying an address with +RTS -xm<addr> -RTS");         stg_exit(EXIT_FAILURE);     } @@ -1943,7 +1943,7 @@ mmap_again:     }  #endif -   IF_DEBUG(linker, debugBelch("mmapForLinker: mapped %" FMT_SizeT " bytes starting at %p\n", (lnat)size, result)); +   IF_DEBUG(linker, debugBelch("mmapForLinker: mapped %" FMT_SizeT " bytes starting at %p\n", (W_)size, result));     IF_DEBUG(linker, debugBelch("mmapForLinker: done\n"));     return result;  } @@ -4937,7 +4937,7 @@ do_Elf_Rel_relocations ( ObjectCode* oc, char* ehdrC,           default:              errorBelch("%s: unhandled ELF relocation(Rel) type %" FMT_SizeT "\n", -                  oc->fileName, (lnat)ELF_R_TYPE(info)); +                  oc->fileName, (W_)ELF_R_TYPE(info));              return 0;        } @@ -5252,7 +5252,7 @@ do_Elf_Rela_relocations ( ObjectCode* oc, char* ehdrC,           default:              errorBelch("%s: unhandled ELF relocation(RelA) type %" FMT_SizeT "\n", -                  oc->fileName, (lnat)ELF_R_TYPE(info)); +                  oc->fileName, (W_)ELF_R_TYPE(info));              return 0;        } diff --git a/rts/Messages.c b/rts/Messages.c index 6cb66479ee..34dcbdf56d 100644 --- a/rts/Messages.c +++ b/rts/Messages.c @@ -74,7 +74,7 @@ loop:      {          StgTSO *tso = ((MessageWakeup *)m)->tso;          debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld",  -                      (lnat)tso->id); +                      (W_)tso->id);          tryWakeupThread(cap, tso);      }      else if (i == &stg_MSG_THROWTO_info) @@ -90,7 +90,7 @@ loop:          }          debugTraceCap(DEBUG_sched, cap, "message: throwTo %ld -> %ld",  -                      (lnat)t->source->id, (lnat)t->target->id); +                      (W_)t->source->id, (W_)t->target->id);          ASSERT(t->source->why_blocked == BlockedOnMsgThrowTo);          ASSERT(t->source->block_info.closure == (StgClosure *)m); @@ -167,7 +167,7 @@ nat messageBlackHole(Capability *cap, MessageBlackHole *msg)      StgTSO *owner;      debugTraceCap(DEBUG_sched, cap, "message: thread %d blocking on blackhole %p",  -                  (lnat)msg->tso->id, msg->bh); +                  (W_)msg->tso->id, msg->bh);      info = bh->header.info; @@ -256,7 +256,7 @@ loop:          recordClosureMutated(cap,bh); // bh was mutated          debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d",  -                      (lnat)msg->tso->id, (lnat)owner->id); +                      (W_)msg->tso->id, (W_)owner->id);          return 1; // blocked      } @@ -289,7 +289,7 @@ loop:          }          debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d",  -                      (lnat)msg->tso->id, (lnat)owner->id); +                      (W_)msg->tso->id, (W_)owner->id);          // See above, #3838          if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) { diff --git a/rts/Printer.c b/rts/Printer.c index 156dbea37a..1b0c4b48c7 100644 --- a/rts/Printer.c +++ b/rts/Printer.c @@ -300,21 +300,21 @@ printClosure( StgClosure *obj )              StgWord i;              debugBelch("ARR_WORDS(\"");  	    for (i=0; i<arr_words_words((StgArrWords *)obj); i++) -	      debugBelch("%" FMT_SizeT, (lnat)((StgArrWords *)obj)->payload[i]); +	      debugBelch("%" FMT_SizeT, (W_)((StgArrWords *)obj)->payload[i]);              debugBelch("\")\n");              break;          }      case MUT_ARR_PTRS_CLEAN: -	debugBelch("MUT_ARR_PTRS_CLEAN(size=%" FMT_SizeT ")\n", (lnat)((StgMutArrPtrs *)obj)->ptrs); +	debugBelch("MUT_ARR_PTRS_CLEAN(size=%" FMT_SizeT ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);  	break;      case MUT_ARR_PTRS_DIRTY: -	debugBelch("MUT_ARR_PTRS_DIRTY(size=%" FMT_SizeT ")\n", (lnat)((StgMutArrPtrs *)obj)->ptrs); +	debugBelch("MUT_ARR_PTRS_DIRTY(size=%" FMT_SizeT ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);  	break;      case MUT_ARR_PTRS_FROZEN: -	debugBelch("MUT_ARR_PTRS_FROZEN(size=%" FMT_SizeT ")\n", (lnat)((StgMutArrPtrs *)obj)->ptrs); +	debugBelch("MUT_ARR_PTRS_FROZEN(size=%" FMT_SizeT ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);  	break;      case MVAR_CLEAN: @@ -431,7 +431,7 @@ printSmallBitmap( StgPtr spBottom, StgPtr payload, StgWord bitmap, nat size )  	    printPtr((P_)payload[i]);  	    debugBelch("\n");  	} else { -	    debugBelch("Word# %" FMT_SizeT "\n", (lnat)payload[i]); +	    debugBelch("Word# %" FMT_SizeT "\n", (W_)payload[i]);  	}      }  } @@ -447,12 +447,12 @@ printLargeBitmap( StgPtr spBottom, StgPtr payload, StgLargeBitmap* large_bitmap,  	StgWord bitmap = large_bitmap->bitmap[bmp];  	j = 0;  	for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) { -	    debugBelch("   stk[%" FMT_SizeT "] (%p) = ", (lnat)(spBottom-(payload+i)), payload+i); +	    debugBelch("   stk[%" FMT_SizeT "] (%p) = ", (W_)(spBottom-(payload+i)), payload+i);  	    if ((bitmap & 1) == 0) {  		printPtr((P_)payload[i]);  		debugBelch("\n");  	    } else { -		debugBelch("Word# %" FMT_SizeT "\n", (lnat)payload[i]); +		debugBelch("Word# %" FMT_SizeT "\n", (W_)payload[i]);  	    }  	}      } diff --git a/rts/ProfHeap.c b/rts/ProfHeap.c index c7048a5cf6..c68b661c86 100644 --- a/rts/ProfHeap.c +++ b/rts/ProfHeap.c @@ -821,7 +821,7 @@ dumpCensus( Census *census )  	}  #endif -	fprintf(hp_file, "\t%" FMT_SizeT "\n", (lnat)count * sizeof(W_)); +	fprintf(hp_file, "\t%" FMT_SizeT "\n", (W_)count * sizeof(W_));      }      printSample(rtsFalse, census->time); diff --git a/rts/Profiling.c b/rts/Profiling.c index 2544e00e21..d43fc6ad54 100644 --- a/rts/Profiling.c +++ b/rts/Profiling.c @@ -42,7 +42,7 @@ unsigned int CCS_ID = 1;  /* figures for the profiling report.   */  static StgWord64 total_alloc; -static lnat      total_prof_ticks; +static W_      total_prof_ticks;  /* Globals for opening the profiling log file(s)   */ diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c index 4bbc3380ae..c07dff76e4 100644 --- a/rts/RetainerProfile.c +++ b/rts/RetainerProfile.c @@ -271,11 +271,11 @@ isEmptyRetainerStack( void )   * Returns size of stack   * -------------------------------------------------------------------------- */  #ifdef DEBUG -lnat +W_  retainerStackBlocks( void )  {      bdescr* bd; -    lnat res = 0; +    W_ res = 0;      for (bd = firstStack; bd != NULL; bd = bd->link)         res += bd->blocks; diff --git a/rts/RetainerProfile.h b/rts/RetainerProfile.h index 0e75327cde..d92563ffbb 100644 --- a/rts/RetainerProfile.h +++ b/rts/RetainerProfile.h @@ -43,7 +43,7 @@ retainerSetOf( StgClosure *c )  // Used by Storage.c:memInventory()  #ifdef DEBUG -extern lnat retainerStackBlocks ( void ); +extern W_ retainerStackBlocks ( void );  #endif  #include "EndPrivate.h" diff --git a/rts/RtsFlags.c b/rts/RtsFlags.c index 7c86efadb7..f5acca8988 100644 --- a/rts/RtsFlags.c +++ b/rts/RtsFlags.c @@ -1542,7 +1542,7 @@ decodeSize(const char *flag, nat offset, StgWord64 min, StgWord64 max)      if (m < 0 || val < min || val > max) {          // printf doesn't like 64-bit format specs on Windows          // apparently, so fall back to unsigned long. -        errorBelch("error in RTS option %s: size outside allowed range (%" FMT_SizeT " - %" FMT_SizeT ")", flag, (lnat)min, (lnat)max); +        errorBelch("error in RTS option %s: size outside allowed range (%" FMT_SizeT " - %" FMT_SizeT ")", flag, (W_)min, (W_)max);          stg_exit(EXIT_FAILURE);      } diff --git a/rts/RtsUtils.c b/rts/RtsUtils.c index b880f8c9e5..4d6d362722 100644 --- a/rts/RtsUtils.c +++ b/rts/RtsUtils.c @@ -130,7 +130,7 @@ heapOverflow(void)      {          /* don't fflush(stdout); WORKAROUND bug in Linux glibc */          OutOfHeapHook(0/*unknown request size*/, -                      (lnat)RtsFlags.GcFlags.maxHeapSize * BLOCK_SIZE); +                      (W_)RtsFlags.GcFlags.maxHeapSize * BLOCK_SIZE);          heap_overflow = rtsTrue;      } diff --git a/rts/Schedule.c b/rts/Schedule.c index a8de843ea6..41f7f37f71 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -1107,9 +1107,9 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )      if (cap->r.rHpAlloc > BLOCK_SIZE) {  	// if so, get one and push it on the front of the nursery.  	bdescr *bd; -	lnat blocks; +	W_ blocks; -	blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE; +	blocks = (W_)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;          if (blocks > BLOCKS_PER_MBLOCK) {              barf("allocation of %ld bytes too large (GHC should have complained at compile-time)", (long)cap->r.rHpAlloc); diff --git a/rts/Stats.c b/rts/Stats.c index b12cb769f7..36ca0e11b3 100644 --- a/rts/Stats.c +++ b/rts/Stats.c @@ -57,14 +57,14 @@ static Time HCe_start_time, HCe_tot_time = 0;   // heap census prof elap time  #endif  // current = current as of last GC -static lnat current_residency = 0; // in words; for stats only -static lnat max_residency     = 0; -static lnat cumulative_residency = 0; -static lnat residency_samples = 0; // for stats only -static lnat current_slop      = 0; -static lnat max_slop          = 0; +static W_ current_residency = 0; // in words; for stats only +static W_ max_residency     = 0; +static W_ cumulative_residency = 0; +static W_ residency_samples = 0; // for stats only +static W_ current_slop      = 0; +static W_ max_slop          = 0; -static lnat GC_end_faults = 0; +static W_ GC_end_faults = 0;  static Time *GC_coll_cpu = NULL;  static Time *GC_coll_elapsed = NULL; @@ -340,8 +340,8 @@ stat_gcWorkerThreadDone (gc_thread *gct STG_UNUSED)  void  stat_endGC (Capability *cap, gc_thread *gct, -            lnat alloc, lnat live, lnat copied, lnat slop, nat gen, -            nat par_n_threads, lnat par_max_copied, lnat par_tot_copied) +            W_ alloc, W_ live, W_ copied, W_ slop, nat gen, +            nat par_n_threads, W_ par_max_copied, W_ par_tot_copied)  {      if (RtsFlags.GcFlags.giveStats != NO_GC_STATS ||          RtsFlags.ProfFlags.doHeapProfile) @@ -419,8 +419,8 @@ stat_endGC (Capability *cap, gc_thread *gct,  	 *       to calculate the total  	 */          { -            lnat tot_alloc = 0; -            lnat n; +            W_ tot_alloc = 0; +            W_ n;              for (n = 0; n < n_capabilities; n++) {                  tot_alloc += capabilities[n].total_allocated;                  traceEventHeapAllocated(&capabilities[n], @@ -627,7 +627,7 @@ stat_exit(int alloc)  	if (tot_elapsed == 0.0) tot_elapsed = 1;  	if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) { -	    statsPrintf("%9" FMT_SizeT " %9.9s %9.9s", (lnat)alloc*sizeof(W_), "", ""); +	    statsPrintf("%9" FMT_SizeT " %9.9s %9.9s", (W_)alloc*sizeof(W_), "", "");  	    statsPrintf(" %5.2f %5.2f\n\n", 0.0, 0.0);  	} @@ -675,7 +675,7 @@ stat_exit(int alloc)  	    statsPrintf("%16" FMT_SizeT " MB total memory in use (%" FMT_SizeT " MB lost due to fragmentation)\n\n",                           peak_mblocks_allocated * MBLOCK_SIZE_W / (1024 * 1024 / sizeof(W_)), -                        (lnat)(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_))); +                        (W_)(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)));  	    /* Print garbage collections in each gen */              statsPrintf("                                    Tot time (elapsed)  Avg pause  Max pause\n"); @@ -856,9 +856,9 @@ void  statDescribeGens(void)  {    nat g, mut, lge, i; -  lnat gen_slop; -  lnat tot_live, tot_slop; -  lnat gen_live, gen_blocks; +  W_ gen_slop; +  W_ tot_live, tot_slop; +  W_ gen_live, gen_blocks;    bdescr *bd;    generation *gen; @@ -896,7 +896,7 @@ statDescribeGens(void)            gen_blocks += gcThreadLiveBlocks(i,g);        } -      debugBelch("%5d %7" FMT_SizeT " %9d", g, (lnat)gen->max_blocks, mut); +      debugBelch("%5d %7" FMT_SizeT " %9d", g, (W_)gen->max_blocks, mut);        gen_slop = gen_blocks * BLOCK_SIZE_W - gen_live; diff --git a/rts/Stats.h b/rts/Stats.h index d74cf2972d..008ef62ac4 100644 --- a/rts/Stats.h +++ b/rts/Stats.h @@ -29,8 +29,8 @@ void      stat_endInit(void);  void      stat_startGC(Capability *cap, struct gc_thread_ *gct);  void      stat_endGC  (Capability *cap, struct gc_thread_ *gct, -                       lnat alloc, lnat live, lnat copied, lnat slop, nat gen, -                       nat n_gc_threads, lnat par_max_copied, lnat par_tot_copied); +                       W_ alloc, W_ live, W_ copied, W_ slop, nat gen, +                       nat n_gc_threads, W_ par_max_copied, W_ par_tot_copied);  void stat_gcWorkerThreadStart (struct gc_thread_ *gct);  void stat_gcWorkerThreadDone  (struct gc_thread_ *gct); diff --git a/rts/Threads.c b/rts/Threads.c index 61bf4445e8..7c975cda78 100644 --- a/rts/Threads.c +++ b/rts/Threads.c @@ -247,7 +247,7 @@ tryWakeupThread (Capability *cap, StgTSO *tso)          msg->tso = tso;          sendMessage(cap, tso->cap, (Message*)msg);          debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d", -                      (lnat)tso->id, tso->cap->no); +                      (W_)tso->id, tso->cap->no);          return;      }  #endif @@ -272,7 +272,7 @@ tryWakeupThread (Capability *cap, StgTSO *tso)          unlockClosure(tso->block_info.closure, i);          if (i != &stg_MSG_NULL_info) {              debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)", -                          (lnat)tso->id, tso->block_info.throwto->header.info); +                          (W_)tso->id, tso->block_info.throwto->header.info);              return;          } @@ -375,7 +375,7 @@ checkBlockingQueues (Capability *cap, StgTSO *tso)      debugTraceCap(DEBUG_sched, cap,                    "collision occurred; checking blocking queues for thread %ld", -                  (lnat)tso->id); +                  (W_)tso->id);      for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {          next = bq->link; @@ -494,7 +494,7 @@ threadStackOverflow (Capability *cap, StgTSO *tso)  {      StgStack *new_stack, *old_stack;      StgUnderflowFrame *frame; -    lnat chunk_size; +    W_ chunk_size;      IF_DEBUG(sanity,checkTSO(tso)); diff --git a/rts/Trace.c b/rts/Trace.c index a946f2c5d3..817184960a 100644 --- a/rts/Trace.c +++ b/rts/Trace.c @@ -204,37 +204,37 @@ static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,      switch (tag) {      case EVENT_CREATE_THREAD:   // (cap, thread)          debugBelch("cap %d: created thread %" FMT_SizeT "\n",  -                   cap->no, (lnat)tso->id); +                   cap->no, (W_)tso->id);          break;      case EVENT_RUN_THREAD:      //  (cap, thread)          debugBelch("cap %d: running thread %" FMT_SizeT " (%s)\n",  -                   cap->no, (lnat)tso->id, what_next_strs[tso->what_next]); +                   cap->no, (W_)tso->id, what_next_strs[tso->what_next]);          break;      case EVENT_THREAD_RUNNABLE: // (cap, thread)          debugBelch("cap %d: thread %" FMT_SizeT " appended to run queue\n",  -                   cap->no, (lnat)tso->id); +                   cap->no, (W_)tso->id);          break;      case EVENT_MIGRATE_THREAD:  // (cap, thread, new_cap)          debugBelch("cap %d: thread %" FMT_SizeT " migrating to cap %d\n",  -                   cap->no, (lnat)tso->id, (int)info1); +                   cap->no, (W_)tso->id, (int)info1);          break;      case EVENT_THREAD_WAKEUP:   // (cap, thread, info1_cap)          debugBelch("cap %d: waking up thread %" FMT_SizeT " on cap %d\n",  -                   cap->no, (lnat)tso->id, (int)info1); +                   cap->no, (W_)tso->id, (int)info1);          break;      case EVENT_STOP_THREAD:     // (cap, thread, status)          if (info1 == 6 + BlockedOnBlackHole) {              debugBelch("cap %d: thread %" FMT_SizeT " stopped (blocked on black hole owned by thread %lu)\n", -                       cap->no, (lnat)tso->id, (long)info2); +                       cap->no, (W_)tso->id, (long)info2);          } else {              debugBelch("cap %d: thread %" FMT_SizeT " stopped (%s)\n", -                       cap->no, (lnat)tso->id, thread_stop_reasons[info1]); +                       cap->no, (W_)tso->id, thread_stop_reasons[info1]);          }          break;      default:          debugBelch("cap %d: thread %" FMT_SizeT ": event %d\n\n",  -                   cap->no, (lnat)tso->id, tag); +                   cap->no, (W_)tso->id, tag);          break;      } @@ -324,7 +324,7 @@ void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag)  void traceHeapEvent_ (Capability   *cap,                        EventTypeNum  tag,                        CapsetID      heap_capset, -                      lnat          info1) +                      W_          info1)  {  #ifdef DEBUG      if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) { @@ -338,10 +338,10 @@ void traceHeapEvent_ (Capability   *cap,  void traceEventHeapInfo_ (CapsetID    heap_capset,                            nat         gens, -                          lnat        maxHeapSize, -                          lnat        allocAreaSize, -                          lnat        mblockSize, -                          lnat        blockSize) +                          W_        maxHeapSize, +                          W_        allocAreaSize, +                          W_        mblockSize, +                          W_        blockSize)  {  #ifdef DEBUG      if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) { @@ -358,12 +358,12 @@ void traceEventHeapInfo_ (CapsetID    heap_capset,  void traceEventGcStats_  (Capability *cap,                            CapsetID    heap_capset,                            nat         gen, -                          lnat        copied, -                          lnat        slop, -                          lnat        fragmentation, +                          W_        copied, +                          W_        slop, +                          W_        fragmentation,                            nat         par_n_threads, -                          lnat        par_max_copied, -                          lnat        par_tot_copied) +                          W_        par_max_copied, +                          W_        par_tot_copied)  {  #ifdef DEBUG      if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) { @@ -423,18 +423,18 @@ void traceCapsetEvent (EventTypeNum tag,          tracePreface();          switch (tag) {          case EVENT_CAPSET_CREATE:   // (capset, capset_type) -            debugBelch("created capset %" FMT_SizeT " of type %d\n", (lnat)capset, (int)info); +            debugBelch("created capset %" FMT_SizeT " of type %d\n", (W_)capset, (int)info);              break;          case EVENT_CAPSET_DELETE:   // (capset) -            debugBelch("deleted capset %" FMT_SizeT "\n", (lnat)capset); +            debugBelch("deleted capset %" FMT_SizeT "\n", (W_)capset);              break;          case EVENT_CAPSET_ASSIGN_CAP:  // (capset, capno)              debugBelch("assigned cap %" FMT_SizeT " to capset %" FMT_SizeT "\n", -                       (lnat)info, (lnat)capset); +                       (W_)info, (W_)capset);              break;          case EVENT_CAPSET_REMOVE_CAP:  // (capset, capno)              debugBelch("removed cap %" FMT_SizeT " from capset %" FMT_SizeT "\n", -                       (lnat)info, (lnat)capset); +                       (W_)info, (W_)capset);              break;          }          RELEASE_LOCK(&trace_utx); @@ -717,7 +717,7 @@ void traceThreadLabel_(Capability *cap,          ACQUIRE_LOCK(&trace_utx);          tracePreface();          debugBelch("cap %d: thread %" FMT_SizeT " has label %s\n", -                   cap->no, (lnat)tso->id, label); +                   cap->no, (W_)tso->id, label);          RELEASE_LOCK(&trace_utx);      } else  #endif diff --git a/rts/Trace.h b/rts/Trace.h index b3710d32c9..4f1ac3bf0a 100644 --- a/rts/Trace.h +++ b/rts/Trace.h @@ -133,24 +133,24 @@ void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag);  void traceHeapEvent_ (Capability   *cap,                        EventTypeNum  tag,                        CapsetID      heap_capset, -                      lnat          info1); +                      W_          info1);  void traceEventHeapInfo_ (CapsetID    heap_capset,                            nat         gens, -                          lnat        maxHeapSize, -                          lnat        allocAreaSize, -                          lnat        mblockSize, -                          lnat        blockSize); +                          W_        maxHeapSize, +                          W_        allocAreaSize, +                          W_        mblockSize, +                          W_        blockSize);  void traceEventGcStats_  (Capability *cap,                            CapsetID    heap_capset,                            nat         gen, -                          lnat        copied, -                          lnat        slop, -                          lnat        fragmentation, +                          W_        copied, +                          W_        slop, +                          W_        fragmentation,                            nat         par_n_threads, -                          lnat        par_max_copied, -                          lnat        par_tot_copied); +                          W_        par_max_copied, +                          W_        par_tot_copied);  /*    * Record a spark event @@ -642,12 +642,12 @@ INLINE_HEADER void traceEventGcGlobalSync(Capability *cap STG_UNUSED)  INLINE_HEADER void traceEventGcStats(Capability *cap            STG_UNUSED,                                       CapsetID    heap_capset    STG_UNUSED,                                       nat         gen            STG_UNUSED, -                                     lnat        copied         STG_UNUSED, -                                     lnat        slop           STG_UNUSED, -                                     lnat        fragmentation  STG_UNUSED, +                                     W_        copied         STG_UNUSED, +                                     W_        slop           STG_UNUSED, +                                     W_        fragmentation  STG_UNUSED,                                       nat         par_n_threads  STG_UNUSED, -                                     lnat        par_max_copied STG_UNUSED, -                                     lnat        par_tot_copied STG_UNUSED) +                                     W_        par_max_copied STG_UNUSED, +                                     W_        par_tot_copied STG_UNUSED)  {      if (RTS_UNLIKELY(TRACE_gc)) {          traceEventGcStats_(cap, heap_capset, gen, @@ -661,10 +661,10 @@ INLINE_HEADER void traceEventGcStats(Capability *cap            STG_UNUSED,  INLINE_HEADER void traceEventHeapInfo(CapsetID    heap_capset   STG_UNUSED,                                        nat         gens          STG_UNUSED, -                                      lnat        maxHeapSize   STG_UNUSED, -                                      lnat        allocAreaSize STG_UNUSED, -                                      lnat        mblockSize    STG_UNUSED, -                                      lnat        blockSize     STG_UNUSED) +                                      W_        maxHeapSize   STG_UNUSED, +                                      W_        allocAreaSize STG_UNUSED, +                                      W_        mblockSize    STG_UNUSED, +                                      W_        blockSize     STG_UNUSED)  {      if (RTS_UNLIKELY(TRACE_gc)) {          traceEventHeapInfo_(heap_capset, gens, @@ -678,7 +678,7 @@ INLINE_HEADER void traceEventHeapInfo(CapsetID    heap_capset   STG_UNUSED,  INLINE_HEADER void traceEventHeapAllocated(Capability *cap         STG_UNUSED,                                             CapsetID    heap_capset STG_UNUSED, -                                           lnat        allocated   STG_UNUSED) +                                           W_        allocated   STG_UNUSED)  {      traceHeapEvent(cap, EVENT_HEAP_ALLOCATED, heap_capset, allocated);      dtraceEventHeapAllocated((EventCapNo)cap->no, heap_capset, allocated); @@ -686,7 +686,7 @@ INLINE_HEADER void traceEventHeapAllocated(Capability *cap         STG_UNUSED,  INLINE_HEADER void traceEventHeapSize(Capability *cap         STG_UNUSED,                                        CapsetID    heap_capset STG_UNUSED, -                                      lnat        heap_size   STG_UNUSED) +                                      W_        heap_size   STG_UNUSED)  {      traceHeapEvent(cap, EVENT_HEAP_SIZE, heap_capset, heap_size);      dtraceEventHeapSize(heap_capset, heap_size); @@ -694,7 +694,7 @@ INLINE_HEADER void traceEventHeapSize(Capability *cap         STG_UNUSED,  INLINE_HEADER void traceEventHeapLive(Capability *cap         STG_UNUSED,                                        CapsetID    heap_capset STG_UNUSED, -                                      lnat        heap_live   STG_UNUSED) +                                      W_        heap_live   STG_UNUSED)  {      traceHeapEvent(cap, EVENT_HEAP_LIVE, heap_capset, heap_live);      dtraceEventHeapLive(heap_capset, heap_live); diff --git a/rts/eventlog/EventLog.c b/rts/eventlog/EventLog.c index b6614b940c..81aaecb67d 100644 --- a/rts/eventlog/EventLog.c +++ b/rts/eventlog/EventLog.c @@ -851,7 +851,7 @@ void postWallClockTime (EventCapsetID capset)  void postHeapEvent (Capability    *cap,                      EventTypeNum   tag,                      EventCapsetID  heap_capset, -                    lnat           info1) +                    W_           info1)  {      EventsBuf *eb; @@ -881,10 +881,10 @@ void postHeapEvent (Capability    *cap,  void postEventHeapInfo (EventCapsetID heap_capset,                          nat           gens, -                        lnat          maxHeapSize, -                        lnat          allocAreaSize, -                        lnat          mblockSize, -                        lnat          blockSize) +                        W_          maxHeapSize, +                        W_          allocAreaSize, +                        W_          mblockSize, +                        W_          blockSize)  {      ACQUIRE_LOCK(&eventBufMutex); @@ -910,12 +910,12 @@ void postEventHeapInfo (EventCapsetID heap_capset,  void postEventGcStats  (Capability    *cap,                          EventCapsetID  heap_capset,                          nat            gen, -                        lnat           copied, -                        lnat           slop, -                        lnat           fragmentation, +                        W_           copied, +                        W_           slop, +                        W_           fragmentation,                          nat            par_n_threads, -                        lnat           par_max_copied, -                        lnat           par_tot_copied) +                        W_           par_max_copied, +                        W_           par_tot_copied)  {      EventsBuf *eb; diff --git a/rts/eventlog/EventLog.h b/rts/eventlog/EventLog.h index 93dd9a8144..5861f64757 100644 --- a/rts/eventlog/EventLog.h +++ b/rts/eventlog/EventLog.h @@ -106,24 +106,24 @@ void postThreadLabel(Capability    *cap,  void postHeapEvent (Capability    *cap,                      EventTypeNum   tag,                      EventCapsetID  heap_capset, -                    lnat           info1); +                    W_           info1);  void postEventHeapInfo (EventCapsetID heap_capset,                          nat           gens, -                        lnat          maxHeapSize, -                        lnat          allocAreaSize, -                        lnat          mblockSize, -                        lnat          blockSize); +                        W_          maxHeapSize, +                        W_          allocAreaSize, +                        W_          mblockSize, +                        W_          blockSize);  void postEventGcStats  (Capability    *cap,                          EventCapsetID  heap_capset,                          nat            gen, -                        lnat           copied, -                        lnat           slop, -                        lnat           fragmentation, +                        W_           copied, +                        W_           slop, +                        W_           fragmentation,                          nat            par_n_threads, -                        lnat           par_max_copied, -                        lnat           par_tot_copied); +                        W_           par_max_copied, +                        W_           par_tot_copied);  void postTaskCreateEvent (EventTaskId taskId,                            EventCapNo cap, diff --git a/rts/hooks/MallocFail.c b/rts/hooks/MallocFail.c index e298c2ee77..6280b2f21d 100644 --- a/rts/hooks/MallocFail.c +++ b/rts/hooks/MallocFail.c @@ -10,7 +10,7 @@  #include <stdio.h>  void -MallocFailHook (lnat request_size /* in bytes */, char *msg) +MallocFailHook (W_ request_size /* in bytes */, char *msg)  {      fprintf(stderr, "malloc: failed on request for %" FMT_SizeT " bytes; message: %s\n", request_size, msg);  } diff --git a/rts/hooks/OutOfHeap.c b/rts/hooks/OutOfHeap.c index 5ed5ed9b96..b54a08a6e8 100644 --- a/rts/hooks/OutOfHeap.c +++ b/rts/hooks/OutOfHeap.c @@ -9,7 +9,7 @@  #include <stdio.h>  void -OutOfHeapHook (lnat request_size, lnat heap_size) /* both sizes in bytes */ +OutOfHeapHook (W_ request_size, W_ heap_size) /* both sizes in bytes */  {    /*    fprintf(stderr, "Heap exhausted;\nwhile trying to allocate %lu bytes in a %lu-byte heap;\nuse `+RTS -H<size>' to increase the total heap size.\n", */ diff --git a/rts/hooks/StackOverflow.c b/rts/hooks/StackOverflow.c index fe8a059b7f..6a58bb8864 100644 --- a/rts/hooks/StackOverflow.c +++ b/rts/hooks/StackOverflow.c @@ -10,7 +10,7 @@  #include <stdio.h>  void -StackOverflowHook (lnat stack_size)    /* in bytes */ +StackOverflowHook (W_ stack_size)    /* in bytes */  {      fprintf(stderr, "Stack space overflow: current size %" FMT_SizeT " bytes.\nUse `+RTS -Ksize -RTS' to increase it.\n", stack_size);  } diff --git a/rts/parallel/ParTicky.c b/rts/parallel/ParTicky.c index 5f3e3e323c..07e3ba9390 100644 --- a/rts/parallel/ParTicky.c +++ b/rts/parallel/ParTicky.c @@ -30,8 +30,8 @@ extern double ElapsedTimeStart;  extern StgWord64 GC_tot_alloc;  extern StgWord64 GC_tot_copied; -extern lnat MaxResidency;     /* in words; for stats only */ -extern lnat ResidencySamples; /* for stats only */ +extern W_ MaxResidency;     /* in words; for stats only */ +extern W_ ResidencySamples; /* for stats only */  /* ngIplu' {Stats.c}vo' */  #define BIG_STRING_LEN              512 diff --git a/rts/posix/OSMem.c b/rts/posix/OSMem.c index 509fc5e88d..43e86b5141 100644 --- a/rts/posix/OSMem.c +++ b/rts/posix/OSMem.c @@ -75,7 +75,7 @@ void osMemInit(void)  // the mmap() interface.  static void * -my_mmap (void *addr, lnat size) +my_mmap (void *addr, W_ size)  {      void *ret; @@ -136,7 +136,7 @@ my_mmap (void *addr, lnat size)  // mblocks.  static void * -gen_map_mblocks (lnat size) +gen_map_mblocks (W_ size)  {      int slop;      StgWord8 *ret; @@ -177,7 +177,7 @@ void *  osGetMBlocks(nat n)  {    caddr_t ret; -  lnat size = MBLOCK_SIZE * (lnat)n; +  W_ size = MBLOCK_SIZE * (W_)n;    if (next_request == 0) {        // use gen_map_mblocks the first time. @@ -226,9 +226,9 @@ void osFreeAllMBlocks(void)      }  } -lnat getPageSize (void) +W_ getPageSize (void)  { -    static lnat pageSize = 0; +    static W_ pageSize = 0;      if (pageSize) {  	return pageSize;      } else { @@ -241,7 +241,7 @@ lnat getPageSize (void)      }  } -void setExecutable (void *p, lnat len, rtsBool exec) +void setExecutable (void *p, W_ len, rtsBool exec)  {      StgWord pageSize = getPageSize(); diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c index 72d5b294c8..113031a20b 100644 --- a/rts/sm/BlockAlloc.c +++ b/rts/sm/BlockAlloc.c @@ -142,8 +142,8 @@ static bdescr *free_mblock_list;  // To find the free list in which to place a block, use log_2(size).  // To find a free block of the right size, use log_2_ceil(size). -lnat n_alloc_blocks;   // currently allocated blocks -lnat hw_alloc_blocks;  // high-water allocated blocks +W_ n_alloc_blocks;   // currently allocated blocks +W_ hw_alloc_blocks;  // high-water allocated blocks  /* -----------------------------------------------------------------------------     Initialisation @@ -810,7 +810,7 @@ nat /* BLOCKS */  countFreeList(void)  {    bdescr *bd; -  lnat total_blocks = 0; +  W_ total_blocks = 0;    nat ln;    for (ln=0; ln < MAX_FREE_LIST; ln++) { diff --git a/rts/sm/BlockAlloc.h b/rts/sm/BlockAlloc.h index a4890e5c24..1df81d83ea 100644 --- a/rts/sm/BlockAlloc.h +++ b/rts/sm/BlockAlloc.h @@ -26,8 +26,8 @@ void markBlocks (bdescr *bd);  void reportUnmarkedBlocks (void);  #endif -extern lnat n_alloc_blocks;   // currently allocated blocks -extern lnat hw_alloc_blocks;  // high-water allocated blocks +extern W_ n_alloc_blocks;   // currently allocated blocks +extern W_ hw_alloc_blocks;  // high-water allocated blocks  #include "EndPrivate.h" diff --git a/rts/sm/Evac.h b/rts/sm/Evac.h index ad56c644d8..cea2be63ae 100644 --- a/rts/sm/Evac.h +++ b/rts/sm/Evac.h @@ -35,7 +35,7 @@  REGPARM1 void evacuate  (StgClosure **p);  REGPARM1 void evacuate1 (StgClosure **p); -extern lnat thunk_selector_depth; +extern W_ thunk_selector_depth;  #include "EndPrivate.h" diff --git a/rts/sm/GC.c b/rts/sm/GC.c index 6098e7955c..52bf199175 100644 --- a/rts/sm/GC.c +++ b/rts/sm/GC.c @@ -102,7 +102,7 @@ rtsBool major_gc;  /* Data used for allocation area sizing.   */ -static lnat g0_pcnt_kept = 30; // percentage of g0 live at last minor GC  +static W_ g0_pcnt_kept = 30; // percentage of g0 live at last minor GC   /* Mut-list stats */  #ifdef DEBUG @@ -149,7 +149,7 @@ static StgWord dec_running          (void);  static void wakeup_gc_threads       (nat me);  static void shutdown_gc_threads     (nat me);  static void collect_gct_blocks      (void); -static lnat collect_pinned_object_blocks (void); +static StgWord collect_pinned_object_blocks (void);  #if 0 && defined(DEBUG)  static void gcCAFs                  (void); @@ -179,7 +179,7 @@ GarbageCollect (nat collect_gen,  {    bdescr *bd;    generation *gen; -  lnat live_blocks, live_words, allocated, par_max_copied, par_tot_copied; +  StgWord live_blocks, live_words, allocated, par_max_copied, par_tot_copied;  #if defined(THREADED_RTS)    gc_thread *saved_gct;  #endif @@ -1275,14 +1275,14 @@ prepare_collected_gen (generation *gen)      // for a compacted generation, we need to allocate the bitmap      if (gen->mark) { -        lnat bitmap_size; // in bytes +        StgWord bitmap_size; // in bytes          bdescr *bitmap_bdescr;          StgWord *bitmap;          bitmap_size = gen->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE); -	 +          if (bitmap_size > 0) { -            bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)  +            bitmap_bdescr = allocGroup((StgWord)BLOCK_ROUND_UP(bitmap_size)                                         / BLOCK_SIZE);              gen->bitmap = bitmap_bdescr;              bitmap = bitmap_bdescr->start; @@ -1405,12 +1405,12 @@ collect_gct_blocks (void)     purposes.     -------------------------------------------------------------------------- */ -static lnat +static StgWord  collect_pinned_object_blocks (void)  {      nat n;      bdescr *bd, *prev; -    lnat allocated = 0; +    StgWord allocated = 0;      for (n = 0; n < n_capabilities; n++) {          prev = NULL; @@ -1510,8 +1510,8 @@ resize_generations (void)      nat g;      if (major_gc && RtsFlags.GcFlags.generations > 1) { -        lnat live, size, min_alloc, words; -	const nat max  = RtsFlags.GcFlags.maxHeapSize; +        W_ live, size, min_alloc, words; +        const nat max  = RtsFlags.GcFlags.maxHeapSize;  	const nat gens = RtsFlags.GcFlags.generations;  	// live in the oldest generations @@ -1604,7 +1604,7 @@ resize_generations (void)  static void  resize_nursery (void)  { -    const lnat min_nursery = RtsFlags.GcFlags.minAllocAreaSize * n_capabilities; +    const StgWord min_nursery = RtsFlags.GcFlags.minAllocAreaSize * n_capabilities;      if (RtsFlags.GcFlags.generations == 1)      {   // Two-space collector: @@ -1664,7 +1664,7 @@ resize_nursery (void)  	if (RtsFlags.GcFlags.heapSizeSuggestion)  	{  	    long blocks; -            lnat needed; +            StgWord needed;              calcNeeded(rtsFalse, &needed); // approx blocks needed at next GC diff --git a/rts/sm/GCThread.h b/rts/sm/GCThread.h index 1b811e43fc..7d163cb48a 100644 --- a/rts/sm/GCThread.h +++ b/rts/sm/GCThread.h @@ -134,7 +134,7 @@ typedef struct gc_thread_ {      StgClosure* static_objects;      // live static objects      StgClosure* scavenged_static_objects;   // static objects scavenged so far -    lnat gc_count;                 // number of GCs this thread has done +    W_ gc_count;                 // number of GCs this thread has done      // block that is currently being scanned      bdescr *     scan_bd; @@ -166,7 +166,7 @@ typedef struct gc_thread_ {                                     // instead of the to-space                                     // corresponding to the object -    lnat thunk_selector_depth;     // used to avoid unbounded recursion in  +    W_ thunk_selector_depth;     // used to avoid unbounded recursion in                                      // evacuate() for THUNK_SELECTOR  #ifdef USE_PAPI @@ -176,17 +176,17 @@ typedef struct gc_thread_ {      // -------------------      // stats -    lnat allocated;          // result of clearNursery() -    lnat copied; -    lnat scanned; -    lnat any_work; -    lnat no_work; -    lnat scav_find_work; +    W_ allocated;          // result of clearNursery() +    W_ copied; +    W_ scanned; +    W_ any_work; +    W_ no_work; +    W_ scav_find_work;      Time gc_start_cpu;   // process CPU time      Time gc_start_elapsed;  // process elapsed time      Time gc_start_thread_cpu; // thread CPU time -    lnat gc_start_faults; +    W_ gc_start_faults;      // -------------------      // workspaces diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c index 677998ff14..996b5f6280 100644 --- a/rts/sm/GCUtils.c +++ b/rts/sm/GCUtils.c @@ -263,7 +263,7 @@ alloc_todo_block (gen_workspace *ws, nat size)  //        bd = hd;          if (size > BLOCK_SIZE_W) { -            bd = allocGroup_sync((lnat)BLOCK_ROUND_UP(size*sizeof(W_)) +            bd = allocGroup_sync((W_)BLOCK_ROUND_UP(size*sizeof(W_))                                   / BLOCK_SIZE);          } else {              bd = allocBlock_sync(); diff --git a/rts/sm/MBlock.c b/rts/sm/MBlock.c index 1801086c2a..6bc4049959 100644 --- a/rts/sm/MBlock.c +++ b/rts/sm/MBlock.c @@ -18,9 +18,9 @@  #include <string.h> -lnat peak_mblocks_allocated = 0; -lnat mblocks_allocated = 0; -lnat mpc_misses = 0; +W_ peak_mblocks_allocated = 0; +W_ mblocks_allocated = 0; +W_ mpc_misses = 0;  /* -----------------------------------------------------------------------------     The MBlock Map: provides our implementation of HEAP_ALLOCED() diff --git a/rts/sm/OSMem.h b/rts/sm/OSMem.h index b3003edd1e..a0d615b424 100644 --- a/rts/sm/OSMem.h +++ b/rts/sm/OSMem.h @@ -16,8 +16,8 @@ void *osGetMBlocks(nat n);  void osFreeMBlocks(char *addr, nat n);  void osReleaseFreeMemory(void);  void osFreeAllMBlocks(void); -lnat getPageSize (void); -void setExecutable (void *p, lnat len, rtsBool exec); +W_ getPageSize (void); +void setExecutable (void *p, W_ len, rtsBool exec);  #include "EndPrivate.h" diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c index ec8921cb43..ffd5d30551 100644 --- a/rts/sm/Sanity.c +++ b/rts/sm/Sanity.c @@ -830,7 +830,7 @@ checkRunQueue(Capability *cap)  void findSlop(bdescr *bd);  void findSlop(bdescr *bd)  { -    lnat slop; +    W_ slop;      for (; bd != NULL; bd = bd->link) {          slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start); @@ -841,7 +841,7 @@ void findSlop(bdescr *bd)      }  } -static lnat +static W_  genBlocks (generation *gen)  {      ASSERT(countBlocks(gen->blocks) == gen->n_blocks); @@ -854,10 +854,10 @@ void  memInventory (rtsBool show)  {    nat g, i; -  lnat gen_blocks[RtsFlags.GcFlags.generations]; -  lnat nursery_blocks, retainer_blocks, +  W_ gen_blocks[RtsFlags.GcFlags.generations]; +  W_ nursery_blocks, retainer_blocks,         arena_blocks, exec_blocks; -  lnat live_blocks = 0, free_blocks = 0; +  W_ live_blocks = 0, free_blocks = 0;    rtsBool leak;    // count the blocks we current have diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index e7e02e6c99..cbdf01b720 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -98,7 +98,7 @@ scavengeTSO (StgTSO *tso)  static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)  { -    lnat m; +    W_ m;      rtsBool any_failed;      StgPtr p, q; @@ -140,7 +140,7 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)  // scavenge only the marked areas of a MUT_ARR_PTRS  static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)  { -    lnat m; +    W_ m;      StgPtr p, q;      rtsBool any_failed; @@ -322,8 +322,8 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap)  	  //   	  // If the SRT entry hasn't got bit 0 set, the SRT entry points to a  	  // closure that's fixed at link-time, and no extra magic is required. -	  if ( (lnat)(*srt) & 0x1 ) { -	      evacuate( (StgClosure**) ((lnat) (*srt) & ~0x1)); +	  if ( (W_)(*srt) & 0x1 ) { +	      evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));  	  } else {  	      evacuate(p);  	  } diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index 1345705046..5bdaeee443 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -235,7 +235,7 @@ void storageAddCapabilities (nat from, nat to)  void  exitStorage (void)  { -    lnat allocated = updateNurseriesStats(); +    W_ allocated = updateNurseriesStats();      stat_exit(allocated);  } @@ -504,15 +504,15 @@ allocNurseries (nat from, nat to)      assignNurseriesToCapabilities(from, to);  } -lnat +W_  clearNursery (Capability *cap)  {      bdescr *bd; -    lnat allocated = 0; +    W_ allocated = 0;      for (bd = nurseries[cap->no].blocks; bd; bd = bd->link) { -        allocated            += (lnat)(bd->free - bd->start); -        cap->total_allocated += (lnat)(bd->free - bd->start); +        allocated            += (W_)(bd->free - bd->start); +        cap->total_allocated += (W_)(bd->free - bd->start);          bd->free = bd->start;          ASSERT(bd->gen_no == 0);          ASSERT(bd->gen == g0); @@ -528,11 +528,11 @@ resetNurseries (void)      assignNurseriesToCapabilities(0, n_capabilities);  } -lnat +W_  countNurseryBlocks (void)  {      nat i; -    lnat blocks = 0; +    W_ blocks = 0;      for (i = 0; i < n_capabilities; i++) {          blocks += nurseries[i].n_blocks; @@ -633,7 +633,7 @@ move_STACK (StgStack *src, StgStack *dest)     -------------------------------------------------------------------------- */  StgPtr -allocate (Capability *cap, lnat n) +allocate (Capability *cap, W_ n)  {      bdescr *bd;      StgPtr p; @@ -642,7 +642,7 @@ allocate (Capability *cap, lnat n)      CCS_ALLOC(cap->r.rCCCS,n);      if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { -        lnat req_blocks =  (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; +        W_ req_blocks =  (W_)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;          // Attempting to allocate an object larger than maxHeapSize          // should definitely be disallowed.  (bug #1791) @@ -740,7 +740,7 @@ allocate (Capability *cap, lnat n)     ------------------------------------------------------------------------- */  StgPtr -allocatePinned (Capability *cap, lnat n) +allocatePinned (Capability *cap, W_ n)  {      StgPtr p;      bdescr *bd; @@ -920,10 +920,10 @@ dirty_MVAR(StgRegTable *reg, StgClosure *p)   * need this function for the final stats when the RTS is shutting down.   * -------------------------------------------------------------------------- */ -lnat +W_  updateNurseriesStats (void)  { -    lnat allocated = 0; +    W_ allocated = 0;      nat i;      for (i = 0; i < n_capabilities; i++) { @@ -935,15 +935,15 @@ updateNurseriesStats (void)      return allocated;  } -lnat +W_  countLargeAllocated (void)  {      return g0->n_new_large_words;  } -lnat countOccupied (bdescr *bd) +W_ countOccupied (bdescr *bd)  { -    lnat words; +    W_ words;      words = 0;      for (; bd != NULL; bd = bd->link) { @@ -953,19 +953,19 @@ lnat countOccupied (bdescr *bd)      return words;  } -lnat genLiveWords (generation *gen) +W_ genLiveWords (generation *gen)  {      return gen->n_words + countOccupied(gen->large_objects);  } -lnat genLiveBlocks (generation *gen) +W_ genLiveBlocks (generation *gen)  {      return gen->n_blocks + gen->n_large_blocks;  } -lnat gcThreadLiveWords (nat i, nat g) +W_ gcThreadLiveWords (nat i, nat g)  { -    lnat words; +    W_ words;      words   = countOccupied(gc_threads[i]->gens[g].todo_bd);      words  += countOccupied(gc_threads[i]->gens[g].part_list); @@ -974,9 +974,9 @@ lnat gcThreadLiveWords (nat i, nat g)      return words;  } -lnat gcThreadLiveBlocks (nat i, nat g) +W_ gcThreadLiveBlocks (nat i, nat g)  { -    lnat blocks; +    W_ blocks;      blocks  = countBlocks(gc_threads[i]->gens[g].todo_bd);      blocks += gc_threads[i]->gens[g].n_part_blocks; @@ -987,10 +987,10 @@ lnat gcThreadLiveBlocks (nat i, nat g)  // Return an accurate count of the live data in the heap, excluding  // generation 0. -lnat calcLiveWords (void) +W_ calcLiveWords (void)  {      nat g; -    lnat live; +    W_ live;      live = 0;      for (g = 0; g < RtsFlags.GcFlags.generations; g++) { @@ -999,10 +999,10 @@ lnat calcLiveWords (void)      return live;  } -lnat calcLiveBlocks (void) +W_ calcLiveBlocks (void)  {      nat g; -    lnat live; +    W_ live;      live = 0;      for (g = 0; g < RtsFlags.GcFlags.generations; g++) { @@ -1021,10 +1021,10 @@ lnat calcLiveBlocks (void)   * that will be collected next time will therefore need twice as many   * blocks since all the data will be copied.   */ -extern lnat  -calcNeeded (rtsBool force_major, lnat *blocks_needed) +extern W_  +calcNeeded (rtsBool force_major, memcount *blocks_needed)  { -    lnat needed = 0, blocks; +    W_ needed = 0, blocks;      nat g, N;      generation *gen; @@ -1137,7 +1137,7 @@ void *allocateExec (nat bytes, void **exec_ret)      if (exec_block == NULL ||           exec_block->free + n + 1 > exec_block->start + BLOCK_SIZE_W) {          bdescr *bd; -        lnat pagesize = getPageSize(); +        W_ pagesize = getPageSize();          bd = allocGroup(stg_max(1, pagesize / BLOCK_SIZE));          debugTrace(DEBUG_gc, "allocate exec block %p", bd->start);          bd->gen_no = 0; diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h index b87a32ce09..251e951edc 100644 --- a/rts/sm/Storage.h +++ b/rts/sm/Storage.h @@ -81,28 +81,28 @@ void dirty_MVAR(StgRegTable *reg, StgClosure *p);  extern nursery *nurseries;  void     resetNurseries       ( void ); -lnat     clearNursery         ( Capability *cap ); +W_     clearNursery         ( Capability *cap );  void     resizeNurseries      ( nat blocks );  void     resizeNurseriesFixed ( nat blocks ); -lnat     countNurseryBlocks   ( void ); +W_     countNurseryBlocks   ( void );  /* -----------------------------------------------------------------------------     Stats 'n' DEBUG stuff     -------------------------------------------------------------------------- */ -lnat    updateNurseriesStats (void); -lnat    countLargeAllocated  (void); -lnat    countOccupied        (bdescr *bd); -lnat    calcNeeded           (rtsBool force_major, lnat *blocks_needed); +W_    updateNurseriesStats (void); +W_    countLargeAllocated  (void); +W_    countOccupied        (bdescr *bd); +W_    calcNeeded           (rtsBool force_major, W_ *blocks_needed); -lnat    gcThreadLiveWords  (nat i, nat g); -lnat    gcThreadLiveBlocks (nat i, nat g); +W_    gcThreadLiveWords  (nat i, nat g); +W_    gcThreadLiveBlocks (nat i, nat g); -lnat    genLiveWords  (generation *gen); -lnat    genLiveBlocks (generation *gen); +W_    genLiveWords  (generation *gen); +W_    genLiveBlocks (generation *gen); -lnat    calcLiveBlocks (void); -lnat    calcLiveWords  (void); +W_    calcLiveBlocks (void); +W_    calcLiveWords  (void);  /* ----------------------------------------------------------------------------     Storage manager internal APIs and globals diff --git a/rts/win32/OSMem.c b/rts/win32/OSMem.c index d9a6459f3d..218b25df13 100644 --- a/rts/win32/OSMem.c +++ b/rts/win32/OSMem.c @@ -16,13 +16,13 @@  typedef struct alloc_rec_ {      char* base;     /* non-aligned base address, directly from VirtualAlloc */ -    lnat size;       /* Size in bytes */ +    W_ size;       /* Size in bytes */      struct alloc_rec_* next;  } alloc_rec;  typedef struct block_rec_ {      char* base;         /* base address, non-MBLOCK-aligned */ -    lnat size;           /* size in bytes */ +    W_ size;           /* size in bytes */      struct block_rec_* next;  } block_rec; @@ -46,7 +46,7 @@ alloc_rec*  allocNew(nat n) {      alloc_rec* rec;      rec = (alloc_rec*)stgMallocBytes(sizeof(alloc_rec),"getMBlocks: allocNew"); -    rec->size = ((lnat)n+1)*MBLOCK_SIZE; +    rec->size = ((W_)n+1)*MBLOCK_SIZE;      rec->base =          VirtualAlloc(NULL, rec->size, MEM_RESERVE, PAGE_READWRITE);      if(rec->base==0) { @@ -76,7 +76,7 @@ allocNew(nat n) {  static  void -insertFree(char* alloc_base, lnat alloc_size) { +insertFree(char* alloc_base, W_ alloc_size) {      block_rec temp;      block_rec* it;      block_rec* prev; @@ -116,7 +116,7 @@ findFreeBlocks(nat n) {      block_rec temp;      block_rec* prev; -    lnat required_size; +    W_ required_size;      it=free_blocks;      required_size = n*MBLOCK_SIZE;      temp.next=free_blocks; temp.base=0; temp.size=0; @@ -124,7 +124,7 @@ findFreeBlocks(nat n) {      /* TODO: Don't just take first block, find smallest sufficient block */      for( ; it!=0 && it->size<required_size; prev=it, it=it->next ) {}      if(it!=0) { -        if( (((lnat)it->base) & MBLOCK_MASK) == 0) { /* MBlock aligned */ +        if( (((W_)it->base) & MBLOCK_MASK) == 0) { /* MBlock aligned */              ret = (void*)it->base;              if(it->size==required_size) {                  prev->next=it->next; @@ -137,7 +137,7 @@ findFreeBlocks(nat n) {              char* need_base;              block_rec* next;              int new_size; -            need_base = (char*)(((lnat)it->base) & ((lnat)~MBLOCK_MASK)) + MBLOCK_SIZE; +            need_base = (char*)(((W_)it->base) & ((W_)~MBLOCK_MASK)) + MBLOCK_SIZE;              next = (block_rec*)stgMallocBytes(                      sizeof(block_rec)                      , "getMBlocks: findFreeBlocks: splitting"); @@ -158,12 +158,12 @@ findFreeBlocks(nat n) {     so we might need to do many VirtualAlloc MEM_COMMITs.  We simply walk the     (ordered) allocated blocks. */  static void -commitBlocks(char* base, lnat size) { +commitBlocks(char* base, W_ size) {      alloc_rec* it;      it=allocs;      for( ; it!=0 && (it->base+it->size)<=base; it=it->next ) {}      for( ; it!=0 && size>0; it=it->next ) { -        lnat size_delta; +        W_ size_delta;          void* temp;          size_delta = it->size - (base-it->base);          if(size_delta>size) size_delta=size; @@ -199,7 +199,7 @@ osGetMBlocks(nat n) {              barf("getMBlocks: misaligned block returned");          } -        commitBlocks(ret, (lnat)MBLOCK_SIZE*n); +        commitBlocks(ret, (W_)MBLOCK_SIZE*n);      }      return ret; @@ -208,7 +208,7 @@ osGetMBlocks(nat n) {  void osFreeMBlocks(char *addr, nat n)  {      alloc_rec *p; -    lnat nBytes = (lnat)n * MBLOCK_SIZE; +    W_ nBytes = (W_)n * MBLOCK_SIZE;      insertFree(addr, nBytes); @@ -229,7 +229,7 @@ void osFreeMBlocks(char *addr, nat n)              nBytes = 0;          }          else { -            lnat bytesToFree = p->base + p->size - addr; +            W_ bytesToFree = p->base + p->size - addr;              if (!VirtualFree(addr, bytesToFree, MEM_DECOMMIT)) {                  sysErrorBelch("osFreeMBlocks: VirtualFree MEM_DECOMMIT failed");                  stg_exit(EXIT_FAILURE); @@ -365,9 +365,9 @@ osFreeAllMBlocks(void)      }  } -lnat getPageSize (void) +W_ getPageSize (void)  { -    static lnat pagesize = 0; +    static W_ pagesize = 0;      if (pagesize) {          return pagesize;      } else { @@ -378,7 +378,7 @@ lnat getPageSize (void)      }  } -void setExecutable (void *p, lnat len, rtsBool exec) +void setExecutable (void *p, W_ len, rtsBool exec)  {      DWORD dwOldProtect = 0;      if (VirtualProtect (p, len,  | 
