summaryrefslogtreecommitdiff
path: root/Source/WTF/wtf
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/WTF/wtf
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/WTF/wtf')
-rw-r--r--Source/WTF/wtf/ASCIICType.h220
-rw-r--r--Source/WTF/wtf/AVLTree.h960
-rw-r--r--Source/WTF/wtf/Assertions.cpp246
-rw-r--r--Source/WTF/wtf/Assertions.h232
-rw-r--r--Source/WTF/wtf/Atomics.cpp53
-rw-r--r--Source/WTF/wtf/Atomics.h585
-rw-r--r--Source/WTF/wtf/AutodrainedPool.h8
-rw-r--r--Source/WTF/wtf/AutomaticThread.cpp235
-rw-r--r--Source/WTF/wtf/AutomaticThread.h193
-rw-r--r--Source/WTF/wtf/BackwardsGraph.h295
-rw-r--r--Source/WTF/wtf/Bag.h46
-rw-r--r--Source/WTF/wtf/BagToHashMap.h6
-rw-r--r--Source/WTF/wtf/BitVector.cpp44
-rw-r--r--Source/WTF/wtf/BitVector.h162
-rw-r--r--Source/WTF/wtf/Bitmap.h208
-rw-r--r--Source/WTF/wtf/BlockObjCExceptions.h32
-rw-r--r--Source/WTF/wtf/BlockPtr.h169
-rw-r--r--Source/WTF/wtf/BloomFilter.h184
-rw-r--r--Source/WTF/wtf/BoundsCheckedPointer.h286
-rw-r--r--Source/WTF/wtf/Box.h81
-rw-r--r--Source/WTF/wtf/Brigand.h2489
-rw-r--r--Source/WTF/wtf/BubbleSort.h102
-rw-r--r--Source/WTF/wtf/ByteOrder.h2
-rw-r--r--Source/WTF/wtf/CMakeLists.txt356
-rw-r--r--Source/WTF/wtf/CONTRIBUTORS.pthreads-win32137
-rw-r--r--Source/WTF/wtf/CheckedArithmetic.h173
-rw-r--r--Source/WTF/wtf/ClockType.cpp (renamed from Source/WTF/wtf/ByteSpinLock.h)54
-rw-r--r--Source/WTF/wtf/ClockType.h (renamed from Source/WTF/wtf/Decoder.h)37
-rw-r--r--Source/WTF/wtf/CommaPrinter.h17
-rw-r--r--Source/WTF/wtf/CompilationThread.cpp6
-rw-r--r--Source/WTF/wtf/CompilationThread.h2
-rw-r--r--Source/WTF/wtf/Compiler.h149
-rw-r--r--Source/WTF/wtf/Compression.cpp179
-rw-r--r--Source/WTF/wtf/Compression.h149
-rw-r--r--Source/WTF/wtf/Condition.h193
-rw-r--r--Source/WTF/wtf/CrossThreadCopier.cpp83
-rw-r--r--Source/WTF/wtf/CrossThreadCopier.h139
-rw-r--r--Source/WTF/wtf/CrossThreadQueue.h96
-rw-r--r--Source/WTF/wtf/CrossThreadTask.h102
-rw-r--r--Source/WTF/wtf/CryptographicUtilities.cpp (renamed from Source/WTF/wtf/PossiblyNull.h)41
-rw-r--r--Source/WTF/wtf/CryptographicUtilities.h38
-rw-r--r--Source/WTF/wtf/CryptographicallyRandomNumber.cpp7
-rw-r--r--Source/WTF/wtf/CryptographicallyRandomNumber.h4
-rw-r--r--Source/WTF/wtf/CurrentTime.cpp86
-rw-r--r--Source/WTF/wtf/CurrentTime.h25
-rw-r--r--Source/WTF/wtf/DataLog.cpp108
-rw-r--r--Source/WTF/wtf/DataLog.h96
-rw-r--r--Source/WTF/wtf/DateMath.cpp80
-rw-r--r--Source/WTF/wtf/DeferrableRefCounted.h38
-rw-r--r--Source/WTF/wtf/DeprecatedOptional.h (renamed from Source/WTF/wtf/PageAllocationAligned.h)49
-rw-r--r--Source/WTF/wtf/Deque.h1189
-rw-r--r--Source/WTF/wtf/DisallowCType.h5
-rw-r--r--Source/WTF/wtf/Dominators.h752
-rw-r--r--Source/WTF/wtf/DynamicAnnotations.cpp61
-rw-r--r--Source/WTF/wtf/DynamicAnnotations.h98
-rw-r--r--Source/WTF/wtf/EnumTraits.h64
-rw-r--r--Source/WTF/wtf/Expected.h456
-rw-r--r--Source/WTF/wtf/ExportMacros.h26
-rw-r--r--Source/WTF/wtf/FastBitVector.cpp48
-rw-r--r--Source/WTF/wtf/FastBitVector.h561
-rw-r--r--Source/WTF/wtf/FastMalloc.cpp5111
-rw-r--r--Source/WTF/wtf/FastMalloc.h338
-rw-r--r--Source/WTF/wtf/FeatureDefines.h433
-rw-r--r--Source/WTF/wtf/FilePrintStream.h5
-rw-r--r--Source/WTF/wtf/FlipBytes.h2
-rw-r--r--Source/WTF/wtf/ForbidHeapAllocation.h37
-rw-r--r--Source/WTF/wtf/Forward.h33
-rw-r--r--Source/WTF/wtf/Function.h100
-rw-r--r--Source/WTF/wtf/FunctionDispatcher.h8
-rw-r--r--Source/WTF/wtf/Functional.h782
-rw-r--r--Source/WTF/wtf/GetPtr.h59
-rw-r--r--Source/WTF/wtf/GraphNodeWorklist.h223
-rw-r--r--Source/WTF/wtf/GregorianDateTime.cpp23
-rw-r--r--Source/WTF/wtf/HashCountedSet.h104
-rw-r--r--Source/WTF/wtf/HashFunctions.h52
-rw-r--r--Source/WTF/wtf/HashIterators.h26
-rw-r--r--Source/WTF/wtf/HashMap.h187
-rw-r--r--Source/WTF/wtf/HashMethod.h (renamed from Source/WTF/wtf/Encoder.h)36
-rw-r--r--Source/WTF/wtf/HashSet.h104
-rw-r--r--Source/WTF/wtf/HashTable.cpp15
-rw-r--r--Source/WTF/wtf/HashTable.h356
-rw-r--r--Source/WTF/wtf/HashTraits.h143
-rw-r--r--Source/WTF/wtf/Hasher.h (renamed from Source/WTF/wtf/StringHasher.h)185
-rw-r--r--Source/WTF/wtf/HexNumber.h43
-rw-r--r--Source/WTF/wtf/Indenter.h67
-rw-r--r--Source/WTF/wtf/IndexMap.h82
-rw-r--r--Source/WTF/wtf/IndexSet.h163
-rw-r--r--Source/WTF/wtf/IndexSparseSet.h147
-rw-r--r--Source/WTF/wtf/IndexedContainerIterator.h81
-rw-r--r--Source/WTF/wtf/InlineASM.h11
-rw-r--r--Source/WTF/wtf/Insertion.h31
-rw-r--r--Source/WTF/wtf/IteratorAdaptors.h20
-rw-r--r--Source/WTF/wtf/IteratorRange.h4
-rw-r--r--Source/WTF/wtf/LEBDecoder.h107
-rw-r--r--Source/WTF/wtf/ListDump.h67
-rw-r--r--Source/WTF/wtf/ListHashSet.h404
-rw-r--r--Source/WTF/wtf/Lock.cpp52
-rw-r--r--Source/WTF/wtf/Lock.h153
-rw-r--r--Source/WTF/wtf/LockAlgorithm.h238
-rw-r--r--Source/WTF/wtf/LockedPrintStream.cpp64
-rw-r--r--Source/WTF/wtf/LockedPrintStream.h58
-rw-r--r--Source/WTF/wtf/Locker.h81
-rw-r--r--Source/WTF/wtf/LoggingAccumulator.h38
-rw-r--r--Source/WTF/wtf/MD5.cpp22
-rw-r--r--Source/WTF/wtf/MD5.h8
-rw-r--r--Source/WTF/wtf/MainThread.cpp180
-rw-r--r--Source/WTF/wtf/MainThread.h46
-rw-r--r--Source/WTF/wtf/MallocPtr.h19
-rw-r--r--Source/WTF/wtf/MathExtras.h184
-rw-r--r--Source/WTF/wtf/MediaTime.cpp243
-rw-r--r--Source/WTF/wtf/MediaTime.h58
-rw-r--r--Source/WTF/wtf/MemoryFootprint.cpp50
-rw-r--r--Source/WTF/wtf/MemoryFootprint.h37
-rw-r--r--Source/WTF/wtf/MessageQueue.h75
-rw-r--r--Source/WTF/wtf/MetaAllocator.cpp41
-rw-r--r--Source/WTF/wtf/MetaAllocator.h13
-rw-r--r--Source/WTF/wtf/MetaAllocatorHandle.h7
-rw-r--r--Source/WTF/wtf/MonotonicTime.cpp53
-rw-r--r--Source/WTF/wtf/MonotonicTime.h144
-rw-r--r--Source/WTF/wtf/NakedPtr.h119
-rw-r--r--Source/WTF/wtf/NeverDestroyed.h62
-rw-r--r--Source/WTF/wtf/NumberOfCores.cpp21
-rw-r--r--Source/WTF/wtf/OSAllocator.h20
-rw-r--r--Source/WTF/wtf/OSAllocatorPosix.cpp18
-rw-r--r--Source/WTF/wtf/OSAllocatorWin.cpp8
-rw-r--r--Source/WTF/wtf/OSObjectPtr.h136
-rw-r--r--Source/WTF/wtf/OSRandomSource.cpp43
-rw-r--r--Source/WTF/wtf/OSRandomSource.h2
-rw-r--r--Source/WTF/wtf/OptionSet.h144
-rw-r--r--Source/WTF/wtf/Optional.h1109
-rw-r--r--Source/WTF/wtf/OrderMaker.h143
-rw-r--r--Source/WTF/wtf/OwnPtr.h193
-rw-r--r--Source/WTF/wtf/OwnPtrCommon.h70
-rw-r--r--Source/WTF/wtf/PageAllocationAligned.cpp85
-rw-r--r--Source/WTF/wtf/PageBlock.h1
-rw-r--r--Source/WTF/wtf/PageReservation.h7
-rw-r--r--Source/WTF/wtf/ParallelHelperPool.cpp237
-rw-r--r--Source/WTF/wtf/ParallelHelperPool.h220
-rw-r--r--Source/WTF/wtf/ParallelJobsGeneric.cpp10
-rw-r--r--Source/WTF/wtf/ParallelJobsGeneric.h10
-rw-r--r--Source/WTF/wtf/ParallelVectorIterator.h82
-rw-r--r--Source/WTF/wtf/ParkingLot.cpp813
-rw-r--r--Source/WTF/wtf/ParkingLot.h182
-rw-r--r--Source/WTF/wtf/PassOwnPtr.h170
-rw-r--r--Source/WTF/wtf/PassRef.h162
-rw-r--r--Source/WTF/wtf/PassRefPtr.h39
-rw-r--r--Source/WTF/wtf/Platform.h764
-rw-r--r--Source/WTF/wtf/PlatformGTK.cmake26
-rw-r--r--Source/WTF/wtf/PlatformJSCOnly.cmake32
-rw-r--r--Source/WTF/wtf/PlatformUserPreferredLanguages.h45
-rw-r--r--Source/WTF/wtf/PlatformUserPreferredLanguagesUnix.cpp52
-rw-r--r--Source/WTF/wtf/PlatformUserPreferredLanguagesWin.cpp83
-rw-r--r--Source/WTF/wtf/PointerComparison.h40
-rw-r--r--Source/WTF/wtf/PrintStream.cpp35
-rw-r--r--Source/WTF/wtf/PrintStream.h319
-rw-r--r--Source/WTF/wtf/ProcessID.h2
-rw-r--r--Source/WTF/wtf/RAMSize.cpp57
-rw-r--r--Source/WTF/wtf/RandomNumber.cpp4
-rw-r--r--Source/WTF/wtf/RandomNumber.h4
-rw-r--r--Source/WTF/wtf/RandomNumberSeed.h7
-rw-r--r--Source/WTF/wtf/RangeSet.h195
-rw-r--r--Source/WTF/wtf/RecursiveLockAdapter.h93
-rw-r--r--Source/WTF/wtf/RedBlackTree.h2
-rw-r--r--Source/WTF/wtf/Ref.h184
-rw-r--r--Source/WTF/wtf/RefCounted.h30
-rw-r--r--Source/WTF/wtf/RefCountedArray.h13
-rw-r--r--Source/WTF/wtf/RefCountedLeakCounter.cpp4
-rw-r--r--Source/WTF/wtf/RefCounter.h132
-rw-r--r--Source/WTF/wtf/RefPtr.h389
-rw-r--r--Source/WTF/wtf/RefPtrHashMap.h334
-rw-r--r--Source/WTF/wtf/RetainPtr.h487
-rw-r--r--Source/WTF/wtf/RunLoop.cpp50
-rw-r--r--Source/WTF/wtf/RunLoop.h123
-rw-r--r--Source/WTF/wtf/RunLoopTimer.h83
-rw-r--r--Source/WTF/wtf/RunLoopTimerCF.cpp92
-rw-r--r--Source/WTF/wtf/SHA1.cpp68
-rw-r--r--Source/WTF/wtf/SHA1.h14
-rw-r--r--Source/WTF/wtf/SaturatedArithmetic.h54
-rw-r--r--Source/WTF/wtf/SchedulePair.h94
-rw-r--r--Source/WTF/wtf/SchedulePairCF.cpp (renamed from Source/WTF/wtf/gtk/MainThreadGtk.cpp)29
-rw-r--r--Source/WTF/wtf/Scope.h79
-rw-r--r--Source/WTF/wtf/ScopedLambda.h190
-rw-r--r--Source/WTF/wtf/Seconds.cpp78
-rw-r--r--Source/WTF/wtf/Seconds.h265
-rw-r--r--Source/WTF/wtf/SegmentedVector.h144
-rw-r--r--Source/WTF/wtf/SentinelLinkedList.h109
-rw-r--r--Source/WTF/wtf/SetForScope.h (renamed from Source/WTF/wtf/TemporaryChange.h)35
-rw-r--r--Source/WTF/wtf/SharedTask.h131
-rw-r--r--Source/WTF/wtf/SimpleStats.h8
-rw-r--r--Source/WTF/wtf/SixCharacterHash.cpp24
-rw-r--r--Source/WTF/wtf/SizeLimits.cpp83
-rw-r--r--Source/WTF/wtf/SmallPtrSet.h253
-rw-r--r--Source/WTF/wtf/Spectrum.h41
-rw-r--r--Source/WTF/wtf/StackBounds.cpp13
-rw-r--r--Source/WTF/wtf/StackBounds.h42
-rw-r--r--Source/WTF/wtf/StackStats.cpp302
-rw-r--r--Source/WTF/wtf/StackStats.h7
-rw-r--r--Source/WTF/wtf/StaticConstructors.h2
-rw-r--r--Source/WTF/wtf/StdLibExtras.h286
-rw-r--r--Source/WTF/wtf/Stopwatch.h95
-rw-r--r--Source/WTF/wtf/StreamBuffer.h6
-rw-r--r--Source/WTF/wtf/StringExtras.h57
-rw-r--r--Source/WTF/wtf/StringPrintStream.cpp34
-rw-r--r--Source/WTF/wtf/StringPrintStream.h56
-rw-r--r--Source/WTF/wtf/SynchronizedFixedQueue.h121
-rw-r--r--Source/WTF/wtf/SystemTracing.h119
-rw-r--r--Source/WTF/wtf/TCPackedCache.h234
-rw-r--r--Source/WTF/wtf/TCPageMap.h361
-rw-r--r--Source/WTF/wtf/TCSpinLock.h133
-rw-r--r--Source/WTF/wtf/TCSystemAlloc.cpp513
-rw-r--r--Source/WTF/wtf/TCSystemAlloc.h75
-rw-r--r--Source/WTF/wtf/ThreadFunctionInvocation.h2
-rw-r--r--Source/WTF/wtf/ThreadIdentifierDataPthreads.cpp7
-rw-r--r--Source/WTF/wtf/ThreadSafeRefCounted.h88
-rw-r--r--Source/WTF/wtf/ThreadSpecific.h160
-rw-r--r--Source/WTF/wtf/ThreadSpecificWin.cpp101
-rw-r--r--Source/WTF/wtf/Threading.cpp145
-rw-r--r--Source/WTF/wtf/Threading.h66
-rw-r--r--Source/WTF/wtf/ThreadingPrimitives.h4
-rw-r--r--Source/WTF/wtf/ThreadingPthreads.cpp61
-rw-r--r--Source/WTF/wtf/ThreadingWin.cpp56
-rw-r--r--Source/WTF/wtf/TimeWithDynamicClockType.cpp147
-rw-r--r--Source/WTF/wtf/TimeWithDynamicClockType.h145
-rw-r--r--Source/WTF/wtf/TinyLRUCache.h84
-rw-r--r--Source/WTF/wtf/TinyPtrSet.h521
-rw-r--r--Source/WTF/wtf/TypeCasts.h112
-rw-r--r--Source/WTF/wtf/UniStdExtras.cpp67
-rw-r--r--Source/WTF/wtf/UniStdExtras.h5
-rw-r--r--Source/WTF/wtf/UniqueRef.h79
-rw-r--r--Source/WTF/wtf/VMTags.h7
-rw-r--r--Source/WTF/wtf/Variant.h2079
-rw-r--r--Source/WTF/wtf/Vector.h575
-rw-r--r--Source/WTF/wtf/VectorTraits.h24
-rw-r--r--Source/WTF/wtf/WTFThreadData.cpp54
-rw-r--r--Source/WTF/wtf/WTFThreadData.h92
-rw-r--r--Source/WTF/wtf/WallTime.cpp53
-rw-r--r--Source/WTF/wtf/WallTime.h143
-rw-r--r--Source/WTF/wtf/WeakPtr.h84
-rw-r--r--Source/WTF/wtf/WeakRandom.h113
-rw-r--r--Source/WTF/wtf/WindowsExtras.h17
-rw-r--r--Source/WTF/wtf/WordLock.cpp267
-rw-r--r--Source/WTF/wtf/WordLock.h118
-rw-r--r--Source/WTF/wtf/WorkQueue.cpp158
-rw-r--r--Source/WTF/wtf/WorkQueue.h117
-rw-r--r--Source/WTF/wtf/dtoa.cpp15
-rw-r--r--Source/WTF/wtf/dtoa.h15
-rw-r--r--Source/WTF/wtf/dtoa/COPYING26
-rw-r--r--Source/WTF/wtf/dtoa/LICENSE26
-rw-r--r--Source/WTF/wtf/dtoa/README11
-rw-r--r--Source/WTF/wtf/dtoa/bignum.cc18
-rw-r--r--Source/WTF/wtf/dtoa/double-conversion.cc13
-rw-r--r--Source/WTF/wtf/dtoa/strtod.cc13
-rw-r--r--Source/WTF/wtf/dtoa/utils.h6
-rw-r--r--Source/WTF/wtf/generic/MainThreadGeneric.cpp43
-rw-r--r--Source/WTF/wtf/generic/RunLoopGeneric.cpp288
-rw-r--r--Source/WTF/wtf/generic/WorkQueueGeneric.cpp73
-rw-r--r--Source/WTF/wtf/glib/GLibUtilities.cpp (renamed from Source/WTF/wtf/gobject/GlibUtilities.cpp)2
-rw-r--r--Source/WTF/wtf/glib/GLibUtilities.h (renamed from Source/WTF/wtf/gobject/GlibUtilities.h)13
-rw-r--r--Source/WTF/wtf/glib/GMutexLocker.h (renamed from Source/WTF/wtf/gobject/GMutexLocker.h)74
-rw-r--r--Source/WTF/wtf/glib/GRefPtr.cpp (renamed from Source/WTF/wtf/gobject/GRefPtr.cpp)29
-rw-r--r--Source/WTF/wtf/glib/GRefPtr.h (renamed from Source/WTF/wtf/gobject/GRefPtr.h)60
-rw-r--r--Source/WTF/wtf/glib/GTypedefs.h (renamed from Source/WTF/wtf/gobject/GTypedefs.h)8
-rw-r--r--Source/WTF/wtf/glib/GUniquePtr.h (renamed from Source/WTF/wtf/gobject/GUniquePtr.h)7
-rw-r--r--Source/WTF/wtf/glib/MainThreadGLib.cpp81
-rw-r--r--Source/WTF/wtf/glib/RunLoopGLib.cpp215
-rw-r--r--Source/WTF/wtf/gtk/RunLoopGtk.cpp169
-rw-r--r--Source/WTF/wtf/mbmalloc.cpp58
-rw-r--r--Source/WTF/wtf/persistence/Coder.h47
-rw-r--r--Source/WTF/wtf/persistence/Coders.cpp156
-rw-r--r--Source/WTF/wtf/persistence/Coders.h302
-rw-r--r--Source/WTF/wtf/persistence/Decoder.cpp133
-rw-r--r--Source/WTF/wtf/persistence/Decoder.h100
-rw-r--r--Source/WTF/wtf/persistence/Encoder.cpp126
-rw-r--r--Source/WTF/wtf/persistence/Encoder.h114
-rw-r--r--Source/WTF/wtf/spi/darwin/CommonCryptoSPI.h46
-rw-r--r--Source/WTF/wtf/spi/darwin/SandboxSPI.h55
-rw-r--r--Source/WTF/wtf/spi/darwin/XPCSPI.h162
-rw-r--r--Source/WTF/wtf/spi/darwin/dyldSPI.h69
-rw-r--r--Source/WTF/wtf/text/ASCIIFastPath.h16
-rw-r--r--Source/WTF/wtf/text/AtomicString.cpp467
-rw-r--r--Source/WTF/wtf/text/AtomicString.h250
-rw-r--r--Source/WTF/wtf/text/AtomicStringHash.h21
-rw-r--r--Source/WTF/wtf/text/AtomicStringImpl.cpp540
-rw-r--r--Source/WTF/wtf/text/AtomicStringImpl.h91
-rw-r--r--Source/WTF/wtf/text/AtomicStringTable.cpp15
-rw-r--r--Source/WTF/wtf/text/AtomicStringTable.h1
-rw-r--r--Source/WTF/wtf/text/Base64.cpp76
-rw-r--r--Source/WTF/wtf/text/Base64.h134
-rw-r--r--Source/WTF/wtf/text/CString.cpp14
-rw-r--r--Source/WTF/wtf/text/CString.h8
-rw-r--r--Source/WTF/wtf/text/IntegerToStringConversion.h84
-rw-r--r--Source/WTF/wtf/text/LChar.h8
-rw-r--r--Source/WTF/wtf/text/LineBreakIteratorPoolICU.h132
-rw-r--r--Source/WTF/wtf/text/OrdinalNumber.h54
-rw-r--r--Source/WTF/wtf/text/StringBuffer.h2
-rw-r--r--Source/WTF/wtf/text/StringBuilder.cpp153
-rw-r--r--Source/WTF/wtf/text/StringBuilder.h55
-rw-r--r--Source/WTF/wtf/text/StringCommon.h656
-rw-r--r--Source/WTF/wtf/text/StringConcatenate.h893
-rw-r--r--Source/WTF/wtf/text/StringConcatenateNumbers.h175
-rw-r--r--Source/WTF/wtf/text/StringHash.h37
-rw-r--r--Source/WTF/wtf/text/StringImpl.cpp942
-rw-r--r--Source/WTF/wtf/text/StringImpl.h974
-rw-r--r--Source/WTF/wtf/text/StringOperators.h8
-rw-r--r--Source/WTF/wtf/text/StringStatics.cpp36
-rw-r--r--Source/WTF/wtf/text/StringView.cpp285
-rw-r--r--Source/WTF/wtf/text/StringView.h952
-rw-r--r--Source/WTF/wtf/text/SymbolImpl.cpp59
-rw-r--r--Source/WTF/wtf/text/SymbolImpl.h126
-rw-r--r--Source/WTF/wtf/text/SymbolRegistry.cpp63
-rw-r--r--Source/WTF/wtf/text/SymbolRegistry.h113
-rw-r--r--Source/WTF/wtf/text/TextBreakIterator.cpp448
-rw-r--r--Source/WTF/wtf/text/TextBreakIterator.h191
-rw-r--r--Source/WTF/wtf/text/TextBreakIteratorInternalICU.h37
-rw-r--r--Source/WTF/wtf/text/TextPosition.h37
-rw-r--r--Source/WTF/wtf/text/UniquedStringImpl.h65
-rw-r--r--Source/WTF/wtf/text/WTFString.cpp273
-rw-r--r--Source/WTF/wtf/text/WTFString.h352
-rw-r--r--Source/WTF/wtf/text/icu/UTextProvider.cpp72
-rw-r--r--Source/WTF/wtf/text/icu/UTextProvider.h111
-rw-r--r--Source/WTF/wtf/text/icu/UTextProviderLatin1.cpp394
-rw-r--r--Source/WTF/wtf/text/icu/UTextProviderLatin1.h46
-rw-r--r--Source/WTF/wtf/text/icu/UTextProviderUTF16.cpp184
-rw-r--r--Source/WTF/wtf/text/icu/UTextProviderUTF16.h37
-rw-r--r--Source/WTF/wtf/text/unix/TextBreakIteratorInternalICUUnix.cpp41
-rw-r--r--Source/WTF/wtf/threads/BinarySemaphore.cpp11
-rw-r--r--Source/WTF/wtf/threads/BinarySemaphore.h13
-rw-r--r--Source/WTF/wtf/unicode/CharacterNames.h36
-rw-r--r--Source/WTF/wtf/unicode/Collator.h53
-rw-r--r--Source/WTF/wtf/unicode/CollatorDefault.cpp48
-rw-r--r--Source/WTF/wtf/unicode/ScriptCodesFromICU.h153
-rw-r--r--Source/WTF/wtf/unicode/UTF8.cpp42
-rw-r--r--Source/WTF/wtf/unicode/UTF8.h11
-rw-r--r--Source/WTF/wtf/unicode/Unicode.h35
-rw-r--r--Source/WTF/wtf/unicode/UnicodeMacrosFromICU.h100
-rw-r--r--Source/WTF/wtf/unicode/icu/CollatorICU.cpp267
-rw-r--r--Source/WTF/wtf/unicode/icu/UnicodeIcu.h32
-rw-r--r--Source/WTF/wtf/win/GDIObject.h131
338 files changed, 35890 insertions, 19031 deletions
diff --git a/Source/WTF/wtf/ASCIICType.h b/Source/WTF/wtf/ASCIICType.h
index 18e108e1b..f5540b834 100644
--- a/Source/WTF/wtf/ASCIICType.h
+++ b/Source/WTF/wtf/ASCIICType.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,6 +30,7 @@
#define WTF_ASCIICType_h
#include <wtf/Assertions.h>
+#include <wtf/text/LChar.h>
// The behavior of many of the functions in the <ctype.h> header is dependent
// on the current locale. But in the WebKit project, all uses of those functions
@@ -43,126 +44,216 @@
namespace WTF {
-template<typename CharType> inline bool isASCII(CharType c)
+template<typename CharacterType> bool isASCII(CharacterType);
+template<typename CharacterType> bool isASCIIAlpha(CharacterType);
+template<typename CharacterType> bool isASCIIAlphanumeric(CharacterType);
+template<typename CharacterType> bool isASCIIBinaryDigit(CharacterType);
+template<typename CharacterType> bool isASCIIDigit(CharacterType);
+template<typename CharacterType> bool isASCIIHexDigit(CharacterType);
+template<typename CharacterType> bool isASCIILower(CharacterType);
+template<typename CharacterType> bool isASCIIOctalDigit(CharacterType);
+template<typename CharacterType> bool isASCIIPrintable(CharacterType);
+template<typename CharacterType> bool isASCIISpace(CharacterType);
+template<typename CharacterType> bool isASCIIUpper(CharacterType);
+
+template<typename CharacterType> CharacterType toASCIILower(CharacterType);
+template<typename CharacterType> CharacterType toASCIIUpper(CharacterType);
+
+template<typename CharacterType> uint8_t toASCIIHexValue(CharacterType);
+template<typename CharacterType> uint8_t toASCIIHexValue(CharacterType firstCharacter, CharacterType secondCharacter);
+
+char lowerNibbleToASCIIHexDigit(uint8_t);
+char upperNibbleToASCIIHexDigit(uint8_t);
+char lowerNibbleToLowercaseASCIIHexDigit(uint8_t);
+char upperNibbleToLowercaseASCIIHexDigit(uint8_t);
+
+template<typename CharacterType> bool isASCIIAlphaCaselessEqual(CharacterType, char expectedASCIILowercaseLetter);
+
+// The toASCIILowerUnchecked function can be used for comparing any input character
+// to a lowercase English character. The isASCIIAlphaCaselessEqual function should
+// be used for regular comparison of ASCII alpha characters, but switch statements
+// in the CSS tokenizer, for example, instead make direct use toASCIILowerUnchecked.
+template<typename CharacterType> CharacterType toASCIILowerUnchecked(CharacterType);
+
+const unsigned char asciiCaseFoldTable[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+};
+
+template<typename CharacterType> inline bool isASCII(CharacterType character)
+{
+ return !(character & ~0x7F);
+}
+
+template<typename CharacterType> inline bool isASCIILower(CharacterType character)
{
- return !(c & ~0x7F);
+ return character >= 'a' && character <= 'z';
}
-template<typename CharType> inline bool isASCIIAlpha(CharType c)
+template<typename CharacterType> inline CharacterType toASCIILowerUnchecked(CharacterType character)
{
- return (c | 0x20) >= 'a' && (c | 0x20) <= 'z';
+ // This function can be used for comparing any input character
+ // to a lowercase English character. The isASCIIAlphaCaselessEqual
+ // below should be used for regular comparison of ASCII alpha
+ // characters, but switch statements in CSS tokenizer instead make
+ // direct use of this function.
+ return character | 0x20;
}
-template<typename CharType> inline bool isASCIIDigit(CharType c)
+template<typename CharacterType> inline bool isASCIIAlpha(CharacterType character)
{
- return c >= '0' && c <= '9';
+ return isASCIILower(toASCIILowerUnchecked(character));
}
-template<typename CharType> inline bool isASCIIAlphanumeric(CharType c)
+template<typename CharacterType> inline bool isASCIIDigit(CharacterType character)
{
- return isASCIIDigit(c) || isASCIIAlpha(c);
+ return character >= '0' && character <= '9';
}
-template<typename CharType> inline bool isASCIIHexDigit(CharType c)
+template<typename CharacterType> inline bool isASCIIAlphanumeric(CharacterType character)
{
- return isASCIIDigit(c) || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f');
+ return isASCIIDigit(character) || isASCIIAlpha(character);
}
-template<typename CharType> inline bool isASCIILower(CharType c)
+template<typename CharacterType> inline bool isASCIIHexDigit(CharacterType character)
{
- return c >= 'a' && c <= 'z';
+ return isASCIIDigit(character) || (toASCIILowerUnchecked(character) >= 'a' && toASCIILowerUnchecked(character) <= 'f');
}
-template<typename CharType> inline bool isASCIIOctalDigit(CharType c)
+template<typename CharacterType> inline bool isASCIIBinaryDigit(CharacterType character)
{
- return (c >= '0') & (c <= '7');
+ return character == '0' || character == '1';
}
-template<typename CharType> inline bool isASCIIPrintable(CharType c)
+template<typename CharacterType> inline bool isASCIIOctalDigit(CharacterType character)
{
- return c >= ' ' && c <= '~';
+ return character >= '0' && character <= '7';
+}
+
+template<typename CharacterType> inline bool isASCIIPrintable(CharacterType character)
+{
+ return character >= ' ' && character <= '~';
}
/*
- Statistics from a run of Apple's page load test for callers of isASCIISpace:
-
- character count
- --------- -----
- non-spaces 689383
- 20 space 294720
- 0A \n 89059
- 09 \t 28320
- 0D \r 0
- 0C \f 0
- 0B \v 0
- */
-template<typename CharType> inline bool isASCIISpace(CharType c)
+ Statistics from a run of Apple's page load test for callers of isASCIISpace:
+
+ character count
+ --------- -----
+ non-spaces 689383
+ 20 space 294720
+ 0A \n 89059
+ 09 \t 28320
+ 0D \r 0
+ 0C \f 0
+ 0B \v 0
+
+ Because of those, we first check to quickly return false for non-control characters,
+ then check for space itself to quickly return true for that case, then do the rest.
+*/
+template<typename CharacterType> inline bool isASCIISpace(CharacterType character)
{
- return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
+ return character <= ' ' && (character == ' ' || (character <= 0xD && character >= 0x9));
}
-template<typename CharType> inline bool isASCIIUpper(CharType c)
+template<typename CharacterType> inline bool isASCIIUpper(CharacterType character)
{
- return c >= 'A' && c <= 'Z';
+ return character >= 'A' && character <= 'Z';
}
-template<typename CharType> inline CharType toASCIILower(CharType c)
+template<typename CharacterType> inline CharacterType toASCIILower(CharacterType character)
{
- return c | ((c >= 'A' && c <= 'Z') << 5);
+ return character | (isASCIIUpper(character) << 5);
}
-template<typename CharType> inline CharType toASCIILowerUnchecked(CharType character)
+template<> inline char toASCIILower(char character)
{
- // This function can be used for comparing any input character
- // to a lowercase English character. The isASCIIAlphaCaselessEqual
- // below should be used for regular comparison of ASCII alpha
- // characters, but switch statements in CSS tokenizer require
- // direct use of this function.
- return character | 0x20;
+ return static_cast<char>(asciiCaseFoldTable[static_cast<uint8_t>(character)]);
}
-template<typename CharType> inline CharType toASCIIUpper(CharType c)
+template<> inline LChar toASCIILower(LChar character)
{
- return c & ~((c >= 'a' && c <= 'z') << 5);
+ return asciiCaseFoldTable[character];
}
-template<typename CharType> inline int toASCIIHexValue(CharType c)
+template<typename CharacterType> inline CharacterType toASCIIUpper(CharacterType character)
{
- ASSERT(isASCIIHexDigit(c));
- return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF;
+ return character & ~(isASCIILower(character) << 5);
}
-template<typename CharType> inline int toASCIIHexValue(CharType upperValue, CharType lowerValue)
+template<typename CharacterType> inline uint8_t toASCIIHexValue(CharacterType character)
{
- ASSERT(isASCIIHexDigit(upperValue) && isASCIIHexDigit(lowerValue));
- return ((toASCIIHexValue(upperValue) << 4) & 0xF0) | toASCIIHexValue(lowerValue);
+ ASSERT(isASCIIHexDigit(character));
+ return character < 'A' ? character - '0' : (character - 'A' + 10) & 0xF;
}
-inline char lowerNibbleToASCIIHexDigit(char c)
+template<typename CharacterType> inline uint8_t toASCIIHexValue(CharacterType firstCharacter, CharacterType secondCharacter)
{
- char nibble = c & 0xF;
- return nibble < 10 ? '0' + nibble : 'A' + nibble - 10;
+ return toASCIIHexValue(firstCharacter) << 4 | toASCIIHexValue(secondCharacter);
}
-inline char upperNibbleToASCIIHexDigit(char c)
+inline char lowerNibbleToASCIIHexDigit(uint8_t value)
{
- char nibble = (c >> 4) & 0xF;
- return nibble < 10 ? '0' + nibble : 'A' + nibble - 10;
+ uint8_t nibble = value & 0xF;
+ return nibble + (nibble < 10 ? '0' : 'A' - 10);
}
-template<typename CharType> inline bool isASCIIAlphaCaselessEqual(CharType cssCharacter, char character)
+inline char upperNibbleToASCIIHexDigit(uint8_t value)
{
- // This function compares a (preferrably) constant ASCII
- // lowercase letter to any input character.
- ASSERT(character >= 'a' && character <= 'z');
- return LIKELY(toASCIILowerUnchecked(cssCharacter) == character);
+ uint8_t nibble = value >> 4;
+ return nibble + (nibble < 10 ? '0' : 'A' - 10);
+}
+
+inline char lowerNibbleToLowercaseASCIIHexDigit(uint8_t value)
+{
+ uint8_t nibble = value & 0xF;
+ return nibble + (nibble < 10 ? '0' : 'a' - 10);
+}
+
+inline char upperNibbleToLowercaseASCIIHexDigit(uint8_t value)
+{
+ uint8_t nibble = value >> 4;
+ return nibble + (nibble < 10 ? '0' : 'a' - 10);
+}
+
+template<typename CharacterType> inline bool isASCIIAlphaCaselessEqual(CharacterType inputCharacter, char expectedASCIILowercaseLetter)
+{
+ // Name of this argument says this must be a lowercase letter, but it can actually be:
+ // - a lowercase letter
+ // - a numeric digit
+ // - a space
+ // - punctuation in the range 0x21-0x3F, including "-", "/", and "+"
+ // It cannot be:
+ // - an uppercase letter
+ // - a non-ASCII character
+ // - other punctuation, such as underscore and backslash
+ // - a control character such as "\n"
+ // FIXME: Would be nice to make both the function name and expectedASCIILowercaseLetter argument name clearer.
+ ASSERT(toASCIILowerUnchecked(expectedASCIILowercaseLetter) == expectedASCIILowercaseLetter);
+ return LIKELY(toASCIILowerUnchecked(inputCharacter) == expectedASCIILowercaseLetter);
}
}
using WTF::isASCII;
using WTF::isASCIIAlpha;
+using WTF::isASCIIAlphaCaselessEqual;
using WTF::isASCIIAlphanumeric;
+using WTF::isASCIIBinaryDigit;
using WTF::isASCIIDigit;
using WTF::isASCIIHexDigit;
using WTF::isASCIILower;
@@ -170,12 +261,13 @@ using WTF::isASCIIOctalDigit;
using WTF::isASCIIPrintable;
using WTF::isASCIISpace;
using WTF::isASCIIUpper;
+using WTF::lowerNibbleToASCIIHexDigit;
+using WTF::lowerNibbleToLowercaseASCIIHexDigit;
using WTF::toASCIIHexValue;
using WTF::toASCIILower;
using WTF::toASCIILowerUnchecked;
using WTF::toASCIIUpper;
-using WTF::lowerNibbleToASCIIHexDigit;
using WTF::upperNibbleToASCIIHexDigit;
-using WTF::isASCIIAlphaCaselessEqual;
+using WTF::upperNibbleToLowercaseASCIIHexDigit;
#endif
diff --git a/Source/WTF/wtf/AVLTree.h b/Source/WTF/wtf/AVLTree.h
deleted file mode 100644
index 61d76fb98..000000000
--- a/Source/WTF/wtf/AVLTree.h
+++ /dev/null
@@ -1,960 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Based on Abstract AVL Tree Template v1.5 by Walt Karas
- * <http://geocities.com/wkaras/gen_cpp/avl_tree.html>.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef AVL_TREE_H_
-#define AVL_TREE_H_
-
-#include <array>
-#include <wtf/Assertions.h>
-
-namespace WTF {
-
-// Here is the reference class for BSet.
-//
-// class BSet
-// {
-// public:
-//
-// class ANY_bitref
-// {
-// public:
-// operator bool ();
-// void operator = (bool b);
-// };
-//
-// // Does not have to initialize bits.
-// BSet();
-//
-// // Must return a valid value for index when 0 <= index < maxDepth
-// ANY_bitref operator [] (unsigned index);
-//
-// // Set all bits to 1.
-// void set();
-//
-// // Set all bits to 0.
-// void reset();
-// };
-
-template<unsigned maxDepth>
-class AVLTreeDefaultBSet {
-public:
- bool& operator[](unsigned i) { ASSERT_WITH_SECURITY_IMPLICATION(i < maxDepth); return m_data[i]; }
- void set() { for (unsigned i = 0; i < maxDepth; ++i) m_data[i] = true; }
- void reset() { for (unsigned i = 0; i < maxDepth; ++i) m_data[i] = false; }
-
-private:
- std::array<bool, maxDepth> m_data;
-};
-
-// How to determine maxDepth:
-// d Minimum number of nodes
-// 2 2
-// 3 4
-// 4 7
-// 5 12
-// 6 20
-// 7 33
-// 8 54
-// 9 88
-// 10 143
-// 11 232
-// 12 376
-// 13 609
-// 14 986
-// 15 1,596
-// 16 2,583
-// 17 4,180
-// 18 6,764
-// 19 10,945
-// 20 17,710
-// 21 28,656
-// 22 46,367
-// 23 75,024
-// 24 121,392
-// 25 196,417
-// 26 317,810
-// 27 514,228
-// 28 832,039
-// 29 1,346,268
-// 30 2,178,308
-// 31 3,524,577
-// 32 5,702,886
-// 33 9,227,464
-// 34 14,930,351
-// 35 24,157,816
-// 36 39,088,168
-// 37 63,245,985
-// 38 102,334,154
-// 39 165,580,140
-// 40 267,914,295
-// 41 433,494,436
-// 42 701,408,732
-// 43 1,134,903,169
-// 44 1,836,311,902
-// 45 2,971,215,072
-//
-// E.g., if, in a particular instantiation, the maximum number of nodes in a tree instance is 1,000,000, the maximum depth should be 28.
-// You pick 28 because MN(28) is 832,039, which is less than or equal to 1,000,000, and MN(29) is 1,346,268, which is strictly greater than 1,000,000.
-
-template <class Abstractor, unsigned maxDepth = 32, class BSet = AVLTreeDefaultBSet<maxDepth>>
-class AVLTree {
-public:
-
- typedef typename Abstractor::key key;
- typedef typename Abstractor::handle handle;
- typedef typename Abstractor::size size;
-
- enum SearchType {
- EQUAL = 1,
- LESS = 2,
- GREATER = 4,
- LESS_EQUAL = EQUAL | LESS,
- GREATER_EQUAL = EQUAL | GREATER
- };
-
-
- Abstractor& abstractor() { return abs; }
-
- inline handle insert(handle h);
-
- inline handle search(key k, SearchType st = EQUAL);
- inline handle search_least();
- inline handle search_greatest();
-
- inline handle remove(key k);
-
- inline handle subst(handle new_node);
-
- void purge() { abs.root = null(); }
-
- bool is_empty() { return abs.root == null(); }
-
- AVLTree() { abs.root = null(); }
-
- class Iterator {
- public:
-
- // Initialize depth to invalid value, to indicate iterator is
- // invalid. (Depth is zero-base.)
- Iterator() { depth = ~0U; }
-
- void start_iter(AVLTree &tree, key k, SearchType st = EQUAL)
- {
- // Mask of high bit in an int.
- const int MASK_HIGH_BIT = (int) ~ ((~ (unsigned) 0) >> 1);
-
- // Save the tree that we're going to iterate through in a
- // member variable.
- tree_ = &tree;
-
- int cmp, target_cmp;
- handle h = tree_->abs.root;
- unsigned d = 0;
-
- depth = ~0U;
-
- if (h == null())
- // Tree is empty.
- return;
-
- if (st & LESS)
- // Key can be greater than key of starting node.
- target_cmp = 1;
- else if (st & GREATER)
- // Key can be less than key of starting node.
- target_cmp = -1;
- else
- // Key must be same as key of starting node.
- target_cmp = 0;
-
- for (;;) {
- cmp = cmp_k_n(k, h);
- if (cmp == 0) {
- if (st & EQUAL) {
- // Equal node was sought and found as starting node.
- depth = d;
- break;
- }
- cmp = -target_cmp;
- } else if (target_cmp != 0) {
- if (!((cmp ^ target_cmp) & MASK_HIGH_BIT)) {
- // cmp and target_cmp are both negative or both positive.
- depth = d;
- }
- }
- h = cmp < 0 ? get_lt(h) : get_gt(h);
- if (h == null())
- break;
- branch[d] = cmp > 0;
- path_h[d++] = h;
- }
- }
-
- void start_iter_least(AVLTree &tree)
- {
- tree_ = &tree;
-
- handle h = tree_->abs.root;
-
- depth = ~0U;
-
- branch.reset();
-
- while (h != null()) {
- if (depth != ~0U)
- path_h[depth] = h;
- depth++;
- h = get_lt(h);
- }
- }
-
- void start_iter_greatest(AVLTree &tree)
- {
- tree_ = &tree;
-
- handle h = tree_->abs.root;
-
- depth = ~0U;
-
- branch.set();
-
- while (h != null()) {
- if (depth != ~0U)
- path_h[depth] = h;
- depth++;
- h = get_gt(h);
- }
- }
-
- handle operator*()
- {
- if (depth == ~0U)
- return null();
-
- return depth == 0 ? tree_->abs.root : path_h[depth - 1];
- }
-
- void operator++()
- {
- if (depth != ~0U) {
- handle h = get_gt(**this);
- if (h == null()) {
- do {
- if (depth == 0) {
- depth = ~0U;
- break;
- }
- depth--;
- } while (branch[depth]);
- } else {
- branch[depth] = true;
- path_h[depth++] = h;
- for (;;) {
- h = get_lt(h);
- if (h == null())
- break;
- branch[depth] = false;
- path_h[depth++] = h;
- }
- }
- }
- }
-
- void operator--()
- {
- if (depth != ~0U) {
- handle h = get_lt(**this);
- if (h == null())
- do {
- if (depth == 0) {
- depth = ~0U;
- break;
- }
- depth--;
- } while (!branch[depth]);
- else {
- branch[depth] = false;
- path_h[depth++] = h;
- for (;;) {
- h = get_gt(h);
- if (h == null())
- break;
- branch[depth] = true;
- path_h[depth++] = h;
- }
- }
- }
- }
-
- void operator++(int) { ++(*this); }
- void operator--(int) { --(*this); }
-
- protected:
-
- // Tree being iterated over.
- AVLTree *tree_;
-
- // Records a path into the tree. If branch[n] is true, indicates
- // take greater branch from the nth node in the path, otherwise
- // take the less branch. branch[0] gives branch from root, and
- // so on.
- BSet branch;
-
- // Zero-based depth of path into tree.
- unsigned depth;
-
- // Handles of nodes in path from root to current node (returned by *).
- handle path_h[maxDepth - 1];
-
- int cmp_k_n(key k, handle h) { return tree_->abs.compare_key_node(k, h); }
- int cmp_n_n(handle h1, handle h2) { return tree_->abs.compare_node_node(h1, h2); }
- handle get_lt(handle h) { return tree_->abs.get_less(h); }
- handle get_gt(handle h) { return tree_->abs.get_greater(h); }
- handle null() { return tree_->abs.null(); }
- };
-
- template<typename fwd_iter>
- bool build(fwd_iter p, size num_nodes)
- {
- if (num_nodes == 0) {
- abs.root = null();
- return true;
- }
-
- // Gives path to subtree being built. If branch[N] is false, branch
- // less from the node at depth N, if true branch greater.
- BSet branch;
-
- // If rem[N] is true, then for the current subtree at depth N, it's
- // greater subtree has one more node than it's less subtree.
- BSet rem;
-
- // Depth of root node of current subtree.
- unsigned depth = 0;
-
- // Number of nodes in current subtree.
- size num_sub = num_nodes;
-
- // The algorithm relies on a stack of nodes whose less subtree has
- // been built, but whose right subtree has not yet been built. The
- // stack is implemented as linked list. The nodes are linked
- // together by having the "greater" handle of a node set to the
- // next node in the list. "less_parent" is the handle of the first
- // node in the list.
- handle less_parent = null();
-
- // h is root of current subtree, child is one of its children.
- handle h, child;
-
- for (;;) {
- while (num_sub > 2) {
- // Subtract one for root of subtree.
- num_sub--;
- rem[depth] = !!(num_sub & 1);
- branch[depth++] = false;
- num_sub >>= 1;
- }
-
- if (num_sub == 2) {
- // Build a subtree with two nodes, slanting to greater.
- // I arbitrarily chose to always have the extra node in the
- // greater subtree when there is an odd number of nodes to
- // split between the two subtrees.
-
- h = *p;
- p++;
- child = *p;
- p++;
- set_lt(child, null());
- set_gt(child, null());
- set_bf(child, 0);
- set_gt(h, child);
- set_lt(h, null());
- set_bf(h, 1);
- } else { // num_sub == 1
- // Build a subtree with one node.
-
- h = *p;
- p++;
- set_lt(h, null());
- set_gt(h, null());
- set_bf(h, 0);
- }
-
- while (depth) {
- depth--;
- if (!branch[depth])
- // We've completed a less subtree.
- break;
-
- // We've completed a greater subtree, so attach it to
- // its parent (that is less than it). We pop the parent
- // off the stack of less parents.
- child = h;
- h = less_parent;
- less_parent = get_gt(h);
- set_gt(h, child);
- // num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1
- num_sub <<= 1;
- num_sub += 1 - rem[depth];
- if (num_sub & (num_sub - 1))
- // num_sub is not a power of 2
- set_bf(h, 0);
- else
- // num_sub is a power of 2
- set_bf(h, 1);
- }
-
- if (num_sub == num_nodes)
- // We've completed the full tree.
- break;
-
- // The subtree we've completed is the less subtree of the
- // next node in the sequence.
-
- child = h;
- h = *p;
- p++;
- set_lt(h, child);
-
- // Put h into stack of less parents.
- set_gt(h, less_parent);
- less_parent = h;
-
- // Proceed to creating greater than subtree of h.
- branch[depth] = true;
- num_sub += rem[depth++];
-
- } // end for (;;)
-
- abs.root = h;
-
- return true;
- }
-
-protected:
-
- friend class Iterator;
-
- // Create a class whose sole purpose is to take advantage of
- // the "empty member" optimization.
- struct abs_plus_root : public Abstractor {
- // The handle of the root element in the AVL tree.
- handle root;
- };
-
- abs_plus_root abs;
-
-
- handle get_lt(handle h) { return abs.get_less(h); }
- void set_lt(handle h, handle lh) { abs.set_less(h, lh); }
-
- handle get_gt(handle h) { return abs.get_greater(h); }
- void set_gt(handle h, handle gh) { abs.set_greater(h, gh); }
-
- int get_bf(handle h) { return abs.get_balance_factor(h); }
- void set_bf(handle h, int bf) { abs.set_balance_factor(h, bf); }
-
- int cmp_k_n(key k, handle h) { return abs.compare_key_node(k, h); }
- int cmp_n_n(handle h1, handle h2) { return abs.compare_node_node(h1, h2); }
-
- handle null() { return abs.null(); }
-
-private:
-
- // Balances subtree, returns handle of root node of subtree
- // after balancing.
- handle balance(handle bal_h)
- {
- handle deep_h;
-
- // Either the "greater than" or the "less than" subtree of
- // this node has to be 2 levels deeper (or else it wouldn't
- // need balancing).
-
- if (get_bf(bal_h) > 0) {
- // "Greater than" subtree is deeper.
-
- deep_h = get_gt(bal_h);
-
- if (get_bf(deep_h) < 0) {
- handle old_h = bal_h;
- bal_h = get_lt(deep_h);
-
- set_gt(old_h, get_lt(bal_h));
- set_lt(deep_h, get_gt(bal_h));
- set_lt(bal_h, old_h);
- set_gt(bal_h, deep_h);
-
- int bf = get_bf(bal_h);
- if (bf != 0) {
- if (bf > 0) {
- set_bf(old_h, -1);
- set_bf(deep_h, 0);
- } else {
- set_bf(deep_h, 1);
- set_bf(old_h, 0);
- }
- set_bf(bal_h, 0);
- } else {
- set_bf(old_h, 0);
- set_bf(deep_h, 0);
- }
- } else {
- set_gt(bal_h, get_lt(deep_h));
- set_lt(deep_h, bal_h);
- if (get_bf(deep_h) == 0) {
- set_bf(deep_h, -1);
- set_bf(bal_h, 1);
- } else {
- set_bf(deep_h, 0);
- set_bf(bal_h, 0);
- }
- bal_h = deep_h;
- }
- } else {
- // "Less than" subtree is deeper.
-
- deep_h = get_lt(bal_h);
-
- if (get_bf(deep_h) > 0) {
- handle old_h = bal_h;
- bal_h = get_gt(deep_h);
- set_lt(old_h, get_gt(bal_h));
- set_gt(deep_h, get_lt(bal_h));
- set_gt(bal_h, old_h);
- set_lt(bal_h, deep_h);
-
- int bf = get_bf(bal_h);
- if (bf != 0) {
- if (bf < 0) {
- set_bf(old_h, 1);
- set_bf(deep_h, 0);
- } else {
- set_bf(deep_h, -1);
- set_bf(old_h, 0);
- }
- set_bf(bal_h, 0);
- } else {
- set_bf(old_h, 0);
- set_bf(deep_h, 0);
- }
- } else {
- set_lt(bal_h, get_gt(deep_h));
- set_gt(deep_h, bal_h);
- if (get_bf(deep_h) == 0) {
- set_bf(deep_h, 1);
- set_bf(bal_h, -1);
- } else {
- set_bf(deep_h, 0);
- set_bf(bal_h, 0);
- }
- bal_h = deep_h;
- }
- }
-
- return bal_h;
- }
-
-};
-
-template <class Abstractor, unsigned maxDepth, class BSet>
-inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
-AVLTree<Abstractor, maxDepth, BSet>::insert(handle h)
-{
- set_lt(h, null());
- set_gt(h, null());
- set_bf(h, 0);
-
- if (abs.root == null())
- abs.root = h;
- else {
- // Last unbalanced node encountered in search for insertion point.
- handle unbal = null();
- // Parent of last unbalanced node.
- handle parent_unbal = null();
- // Balance factor of last unbalanced node.
- int unbal_bf;
-
- // Zero-based depth in tree.
- unsigned depth = 0, unbal_depth = 0;
-
- // Records a path into the tree. If branch[n] is true, indicates
- // take greater branch from the nth node in the path, otherwise
- // take the less branch. branch[0] gives branch from root, and
- // so on.
- BSet branch;
-
- handle hh = abs.root;
- handle parent = null();
- int cmp;
-
- do {
- if (get_bf(hh) != 0) {
- unbal = hh;
- parent_unbal = parent;
- unbal_depth = depth;
- }
- cmp = cmp_n_n(h, hh);
- if (cmp == 0)
- // Duplicate key.
- return hh;
- parent = hh;
- hh = cmp < 0 ? get_lt(hh) : get_gt(hh);
- branch[depth++] = cmp > 0;
- } while (hh != null());
-
- // Add node to insert as leaf of tree.
- if (cmp < 0)
- set_lt(parent, h);
- else
- set_gt(parent, h);
-
- depth = unbal_depth;
-
- if (unbal == null())
- hh = abs.root;
- else {
- cmp = branch[depth++] ? 1 : -1;
- unbal_bf = get_bf(unbal);
- if (cmp < 0)
- unbal_bf--;
- else // cmp > 0
- unbal_bf++;
- hh = cmp < 0 ? get_lt(unbal) : get_gt(unbal);
- if ((unbal_bf != -2) && (unbal_bf != 2)) {
- // No rebalancing of tree is necessary.
- set_bf(unbal, unbal_bf);
- unbal = null();
- }
- }
-
- if (hh != null())
- while (h != hh) {
- cmp = branch[depth++] ? 1 : -1;
- if (cmp < 0) {
- set_bf(hh, -1);
- hh = get_lt(hh);
- } else { // cmp > 0
- set_bf(hh, 1);
- hh = get_gt(hh);
- }
- }
-
- if (unbal != null()) {
- unbal = balance(unbal);
- if (parent_unbal == null())
- abs.root = unbal;
- else {
- depth = unbal_depth - 1;
- cmp = branch[depth] ? 1 : -1;
- if (cmp < 0)
- set_lt(parent_unbal, unbal);
- else // cmp > 0
- set_gt(parent_unbal, unbal);
- }
- }
- }
-
- return h;
-}
-
-template <class Abstractor, unsigned maxDepth, class BSet>
-inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
-AVLTree<Abstractor, maxDepth, BSet>::search(key k, typename AVLTree<Abstractor, maxDepth, BSet>::SearchType st)
-{
- const int MASK_HIGH_BIT = (int) ~ ((~ (unsigned) 0) >> 1);
-
- int cmp, target_cmp;
- handle match_h = null();
- handle h = abs.root;
-
- if (st & LESS)
- target_cmp = 1;
- else if (st & GREATER)
- target_cmp = -1;
- else
- target_cmp = 0;
-
- while (h != null()) {
- cmp = cmp_k_n(k, h);
- if (cmp == 0) {
- if (st & EQUAL) {
- match_h = h;
- break;
- }
- cmp = -target_cmp;
- } else if (target_cmp != 0)
- if (!((cmp ^ target_cmp) & MASK_HIGH_BIT))
- // cmp and target_cmp are both positive or both negative.
- match_h = h;
- h = cmp < 0 ? get_lt(h) : get_gt(h);
- }
-
- return match_h;
-}
-
-template <class Abstractor, unsigned maxDepth, class BSet>
-inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
-AVLTree<Abstractor, maxDepth, BSet>::search_least()
-{
- handle h = abs.root, parent = null();
-
- while (h != null()) {
- parent = h;
- h = get_lt(h);
- }
-
- return parent;
-}
-
-template <class Abstractor, unsigned maxDepth, class BSet>
-inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
-AVLTree<Abstractor, maxDepth, BSet>::search_greatest()
-{
- handle h = abs.root, parent = null();
-
- while (h != null()) {
- parent = h;
- h = get_gt(h);
- }
-
- return parent;
-}
-
-template <class Abstractor, unsigned maxDepth, class BSet>
-inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
-AVLTree<Abstractor, maxDepth, BSet>::remove(key k)
-{
- // Zero-based depth in tree.
- unsigned depth = 0, rm_depth;
-
- // Records a path into the tree. If branch[n] is true, indicates
- // take greater branch from the nth node in the path, otherwise
- // take the less branch. branch[0] gives branch from root, and
- // so on.
- BSet branch;
-
- handle h = abs.root;
- handle parent = null(), child;
- int cmp, cmp_shortened_sub_with_path = 0;
-
- for (;;) {
- if (h == null())
- // No node in tree with given key.
- return null();
- cmp = cmp_k_n(k, h);
- if (cmp == 0)
- // Found node to remove.
- break;
- parent = h;
- h = cmp < 0 ? get_lt(h) : get_gt(h);
- branch[depth++] = cmp > 0;
- cmp_shortened_sub_with_path = cmp;
- }
- handle rm = h;
- handle parent_rm = parent;
- rm_depth = depth;
-
- // If the node to remove is not a leaf node, we need to get a
- // leaf node, or a node with a single leaf as its child, to put
- // in the place of the node to remove. We will get the greatest
- // node in the less subtree (of the node to remove), or the least
- // node in the greater subtree. We take the leaf node from the
- // deeper subtree, if there is one.
-
- if (get_bf(h) < 0) {
- child = get_lt(h);
- branch[depth] = false;
- cmp = -1;
- } else {
- child = get_gt(h);
- branch[depth] = true;
- cmp = 1;
- }
- depth++;
-
- if (child != null()) {
- cmp = -cmp;
- do {
- parent = h;
- h = child;
- if (cmp < 0) {
- child = get_lt(h);
- branch[depth] = false;
- } else {
- child = get_gt(h);
- branch[depth] = true;
- }
- depth++;
- } while (child != null());
-
- if (parent == rm)
- // Only went through do loop once. Deleted node will be replaced
- // in the tree structure by one of its immediate children.
- cmp_shortened_sub_with_path = -cmp;
- else
- cmp_shortened_sub_with_path = cmp;
-
- // Get the handle of the opposite child, which may not be null.
- child = cmp > 0 ? get_lt(h) : get_gt(h);
- }
-
- if (parent == null())
- // There were only 1 or 2 nodes in this tree.
- abs.root = child;
- else if (cmp_shortened_sub_with_path < 0)
- set_lt(parent, child);
- else
- set_gt(parent, child);
-
- // "path" is the parent of the subtree being eliminated or reduced
- // from a depth of 2 to 1. If "path" is the node to be removed, we
- // set path to the node we're about to poke into the position of the
- // node to be removed.
- handle path = parent == rm ? h : parent;
-
- if (h != rm) {
- // Poke in the replacement for the node to be removed.
- set_lt(h, get_lt(rm));
- set_gt(h, get_gt(rm));
- set_bf(h, get_bf(rm));
- if (parent_rm == null())
- abs.root = h;
- else {
- depth = rm_depth - 1;
- if (branch[depth])
- set_gt(parent_rm, h);
- else
- set_lt(parent_rm, h);
- }
- }
-
- if (path != null()) {
- // Create a temporary linked list from the parent of the path node
- // to the root node.
- h = abs.root;
- parent = null();
- depth = 0;
- while (h != path) {
- if (branch[depth++]) {
- child = get_gt(h);
- set_gt(h, parent);
- } else {
- child = get_lt(h);
- set_lt(h, parent);
- }
- parent = h;
- h = child;
- }
-
- // Climb from the path node to the root node using the linked
- // list, restoring the tree structure and rebalancing as necessary.
- bool reduced_depth = true;
- int bf;
- cmp = cmp_shortened_sub_with_path;
- for (;;) {
- if (reduced_depth) {
- bf = get_bf(h);
- if (cmp < 0)
- bf++;
- else // cmp > 0
- bf--;
- if ((bf == -2) || (bf == 2)) {
- h = balance(h);
- bf = get_bf(h);
- } else
- set_bf(h, bf);
- reduced_depth = (bf == 0);
- }
- if (parent == null())
- break;
- child = h;
- h = parent;
- cmp = branch[--depth] ? 1 : -1;
- if (cmp < 0) {
- parent = get_lt(h);
- set_lt(h, child);
- } else {
- parent = get_gt(h);
- set_gt(h, child);
- }
- }
- abs.root = h;
- }
-
- return rm;
-}
-
-template <class Abstractor, unsigned maxDepth, class BSet>
-inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
-AVLTree<Abstractor, maxDepth, BSet>::subst(handle new_node)
-{
- handle h = abs.root;
- handle parent = null();
- int cmp, last_cmp;
-
- /* Search for node already in tree with same key. */
- for (;;) {
- if (h == null())
- /* No node in tree with same key as new node. */
- return null();
- cmp = cmp_n_n(new_node, h);
- if (cmp == 0)
- /* Found the node to substitute new one for. */
- break;
- last_cmp = cmp;
- parent = h;
- h = cmp < 0 ? get_lt(h) : get_gt(h);
- }
-
- /* Copy tree housekeeping fields from node in tree to new node. */
- set_lt(new_node, get_lt(h));
- set_gt(new_node, get_gt(h));
- set_bf(new_node, get_bf(h));
-
- if (parent == null())
- /* New node is also new root. */
- abs.root = new_node;
- else {
- /* Make parent point to new node. */
- if (last_cmp < 0)
- set_lt(parent, new_node);
- else
- set_gt(parent, new_node);
- }
-
- return h;
-}
-
-}
-
-#endif
diff --git a/Source/WTF/wtf/Assertions.cpp b/Source/WTF/wtf/Assertions.cpp
index a37030245..8613df5de 100644
--- a/Source/WTF/wtf/Assertions.cpp
+++ b/Source/WTF/wtf/Assertions.cpp
@@ -12,10 +12,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -35,27 +35,31 @@
#include "Assertions.h"
#include "Compiler.h"
+#include <mutex>
+#include <stdio.h>
+#include <string.h>
+#include <wtf/Lock.h>
+#include <wtf/Locker.h>
+#include <wtf/LoggingAccumulator.h>
#include <wtf/StdLibExtras.h>
#include <wtf/StringExtras.h>
#include <wtf/text/CString.h>
+#include <wtf/text/StringBuilder.h>
#include <wtf/text/WTFString.h>
-#include <stdio.h>
-#include <string.h>
-
#if HAVE(SIGNAL_H)
#include <signal.h>
#endif
#if USE(CF)
#include <CoreFoundation/CFString.h>
-#if PLATFORM(IOS) || __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
-#define WTF_USE_APPLE_SYSTEM_LOG 1
+#if PLATFORM(COCOA)
+#define USE_APPLE_SYSTEM_LOG 1
#include <asl.h>
#endif
#endif // USE(CF)
-#if COMPILER(MSVC) && !OS(WINCE)
+#if COMPILER(MSVC)
#include <crtdbg.h>
#endif
@@ -63,7 +67,12 @@
#include <windows.h>
#endif
-#if OS(DARWIN) || (OS(LINUX) && !defined(__UCLIBC__))
+#if OS(DARWIN)
+#include <sys/sysctl.h>
+#include <unistd.h>
+#endif
+
+#if OS(DARWIN) || (OS(LINUX) && defined(__GLIBC__) && !defined(__UCLIBC__))
#include <cxxabi.h>
#include <dlfcn.h>
#include <execinfo.h>
@@ -71,6 +80,17 @@
extern "C" {
+static void logToStderr(const char* buffer)
+{
+#if USE(APPLE_SYSTEM_LOG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+ asl_log(0, 0, ASL_LEVEL_NOTICE, "%s", buffer);
+#pragma clang diagnostic pop
+#endif
+ fputs(buffer, stderr);
+}
+
WTF_ATTRIBUTE_PRINTF(1, 0)
static void vprintf_stderr_common(const char* format, va_list args)
{
@@ -91,10 +111,7 @@ static void vprintf_stderr_common(const char* format, va_list args)
CFStringGetCString(str, buffer, length, kCFStringEncodingUTF8);
-#if USE(APPLE_SYSTEM_LOG)
- asl_log(0, 0, ASL_LEVEL_NOTICE, "%s", buffer);
-#endif
- fputs(buffer, stderr);
+ logToStderr(buffer);
free(buffer);
CFRelease(str);
@@ -103,10 +120,13 @@ static void vprintf_stderr_common(const char* format, va_list args)
}
#if USE(APPLE_SYSTEM_LOG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
va_list copyOfArgs;
va_copy(copyOfArgs, args);
asl_vlog(0, 0, ASL_LEVEL_NOTICE, format, copyOfArgs);
va_end(copyOfArgs);
+#pragma clang diagnostic pop
#endif
// Fall through to write to stderr in the same manner as other platforms.
@@ -121,21 +141,8 @@ static void vprintf_stderr_common(const char* format, va_list args)
if (buffer == NULL)
break;
- if (_vsnprintf(buffer, size, format, args) != -1) {
-#if OS(WINCE)
- // WinCE only supports wide chars
- wchar_t* wideBuffer = (wchar_t*)malloc(size * sizeof(wchar_t));
- if (wideBuffer == NULL)
- break;
- for (unsigned int i = 0; i < size; ++i) {
- if (!(wideBuffer[i] = buffer[i]))
- break;
- }
- OutputDebugStringW(wideBuffer);
- free(wideBuffer);
-#else
+ if (vsnprintf(buffer, size, format, args) != -1) {
OutputDebugStringA(buffer);
-#endif
free(buffer);
break;
}
@@ -148,7 +155,7 @@ static void vprintf_stderr_common(const char* format, va_list args)
vfprintf(stderr, format, args);
}
-#if COMPILER(CLANG) || (COMPILER(GCC) && GCC_VERSION_AT_LEAST(4, 6, 0))
+#if COMPILER(GCC_OR_CLANG)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-nonliteral"
#endif
@@ -181,7 +188,7 @@ static void vprintf_stderr_with_trailing_newline(const char* format, va_list arg
vprintf_stderr_common(formatWithNewline.get(), args);
}
-#if COMPILER(CLANG) || (COMPILER(GCC) && GCC_VERSION_AT_LEAST(4, 6, 0))
+#if COMPILER(GCC_OR_CLANG)
#pragma GCC diagnostic pop
#endif
@@ -196,7 +203,7 @@ static void printf_stderr_common(const char* format, ...)
static void printCallSite(const char* file, int line, const char* function)
{
-#if OS(WINDOWS) && !OS(WINCE) && defined(_DEBUG)
+#if OS(WINDOWS) && defined(_DEBUG)
_CrtDbgReport(_CRT_WARN, file, line, NULL, "%s\n", function);
#else
// By using this format, which matches the format used by MSVC for compiler errors, developers
@@ -233,24 +240,10 @@ void WTFReportArgumentAssertionFailure(const char* file, int line, const char* f
void WTFGetBacktrace(void** stack, int* size)
{
-#if OS(DARWIN) || (OS(LINUX) && !defined(__UCLIBC__))
+#if OS(DARWIN) || (OS(LINUX) && defined(__GLIBC__) && !defined(__UCLIBC__))
*size = backtrace(stack, *size);
-#elif OS(WINDOWS) && !OS(WINCE)
- // The CaptureStackBackTrace function is available in XP, but it is not defined
- // in the Windows Server 2003 R2 Platform SDK. So, we'll grab the function
- // through GetProcAddress.
- typedef WORD (NTAPI* RtlCaptureStackBackTraceFunc)(DWORD, DWORD, PVOID*, PDWORD);
- HMODULE kernel32 = ::GetModuleHandleW(L"Kernel32.dll");
- if (!kernel32) {
- *size = 0;
- return;
- }
- RtlCaptureStackBackTraceFunc captureStackBackTraceFunc = reinterpret_cast<RtlCaptureStackBackTraceFunc>(
- ::GetProcAddress(kernel32, "RtlCaptureStackBackTrace"));
- if (captureStackBackTraceFunc)
- *size = captureStackBackTraceFunc(0, *size, stack, 0);
- else
- *size = 0;
+#elif OS(WINDOWS)
+ *size = RtlCaptureStackBackTrace(0, *size, stack, 0);
#else
*size = 0;
#endif
@@ -270,10 +263,10 @@ void WTFReportBacktrace()
#if OS(DARWIN) || OS(LINUX)
# if PLATFORM(GTK)
# if defined(__GLIBC__) && !defined(__UCLIBC__)
-# define WTF_USE_BACKTRACE_SYMBOLS 1
+# define USE_BACKTRACE_SYMBOLS 1
# endif
# else
-# define WTF_USE_DLADDR 1
+# define USE_DLADDR 1
# endif
#endif
@@ -310,8 +303,8 @@ void WTFPrintBacktrace(void** stack, int size)
#endif
}
-#undef WTF_USE_BACKTRACE_SYMBOLS
-#undef WTF_USE_DLADDR
+#undef USE_BACKTRACE_SYMBOLS
+#undef USE_DLADDR
static WTFCrashHookFunction globalHook = 0;
@@ -320,10 +313,7 @@ void WTFSetCrashHook(WTFCrashHookFunction function)
globalHook = function;
}
-void WTFInvokeCrashHook()
-{
-}
-
+#if !defined(NDEBUG) || !OS(DARWIN)
void WTFCrash()
{
if (globalHook)
@@ -332,25 +322,25 @@ void WTFCrash()
WTFReportBacktrace();
*(int *)(uintptr_t)0xbbadbeef = 0;
// More reliable, but doesn't say BBADBEEF.
-#if COMPILER(CLANG)
+#if COMPILER(GCC_OR_CLANG)
__builtin_trap();
#else
((void(*)())0)();
#endif
}
-
+#else
+// We need to keep WTFCrash() around (even on non-debug OS(DARWIN) builds) as a workaround
+// for presently shipping (circa early 2016) SafariForWebKitDevelopment binaries which still
+// expects to link to it.
+void WTFCrash()
+{
+ CRASH();
+}
+#endif // !defined(NDEBUG) || !OS(DARWIN)
+
void WTFCrashWithSecurityImplication()
{
- if (globalHook)
- globalHook();
- WTFReportBacktrace();
- *(int *)(uintptr_t)0xfbadbeef = 0;
- // More reliable, but doesn't say fbadbeef.
-#if COMPILER(CLANG)
- __builtin_trap();
-#else
- ((void(*)())0)();
-#endif
+ CRASH();
}
#if HAVE(SIGNAL_H)
@@ -389,6 +379,20 @@ void WTFInstallReportBacktraceOnCrashHook()
#endif
}
+bool WTFIsDebuggerAttached()
+{
+#if OS(DARWIN)
+ struct kinfo_proc info;
+ int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid() };
+ size_t size = sizeof(info);
+ if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), &info, &size, nullptr, 0) == -1)
+ return false;
+ return info.kp_proc.p_flag & P_TRACED;
+#else
+ return false;
+#endif
+}
+
void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...)
{
va_list args;
@@ -409,15 +413,83 @@ void WTFReportError(const char* file, int line, const char* function, const char
printCallSite(file, line, function);
}
+class WTFLoggingAccumulator {
+public:
+ void accumulate(const String&);
+ void resetAccumulatedLogs();
+ String getAndResetAccumulatedLogs();
+
+private:
+ Lock accumulatorLock;
+ StringBuilder loggingAccumulator;
+};
+
+void WTFLoggingAccumulator::accumulate(const String& log)
+{
+ Locker<Lock> locker(accumulatorLock);
+ loggingAccumulator.append(log);
+}
+
+void WTFLoggingAccumulator::resetAccumulatedLogs()
+{
+ Locker<Lock> locker(accumulatorLock);
+ loggingAccumulator.clear();
+}
+
+String WTFLoggingAccumulator::getAndResetAccumulatedLogs()
+{
+ Locker<Lock> locker(accumulatorLock);
+ String result = loggingAccumulator.toString();
+ loggingAccumulator.clear();
+ return result;
+}
+
+static WTFLoggingAccumulator& loggingAccumulator()
+{
+ static WTFLoggingAccumulator* accumulator;
+ static std::once_flag initializeAccumulatorOnce;
+ std::call_once(initializeAccumulatorOnce, [] {
+ accumulator = new WTFLoggingAccumulator;
+ });
+
+ return *accumulator;
+}
+
void WTFLog(WTFLogChannel* channel, const char* format, ...)
{
- if (channel->state != WTFLogChannelOn)
+ if (channel->state == WTFLogChannelOff)
return;
+ if (channel->state == WTFLogChannelOn) {
+ va_list args;
+ va_start(args, format);
+ vprintf_stderr_with_trailing_newline(format, args);
+ va_end(args);
+ return;
+ }
+
+ ASSERT(channel->state == WTFLogChannelOnWithAccumulation);
+
va_list args;
va_start(args, format);
- vprintf_stderr_with_trailing_newline(format, args);
+
+#if COMPILER(CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wformat-nonliteral"
+#endif
+ String loggingString = String::format(format, args);
+#if COMPILER(CLANG)
+#pragma clang diagnostic pop
+#endif
+
va_end(args);
+
+ if (!loggingString.endsWith('\n'))
+ loggingString.append('\n');
+
+ loggingAccumulator().accumulate(loggingString);
+
+ logToStderr(loggingString.utf8().data());
}
void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel* channel, const char* format, ...)
@@ -427,7 +499,16 @@ void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChann
va_list args;
va_start(args, format);
- vprintf_stderr_with_trailing_newline(format, args);
+
+#if COMPILER(CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wformat-nonliteral"
+#endif
+ WTFLog(channel, format, args);
+#if COMPILER(CLANG)
+#pragma clang diagnostic pop
+#endif
+
va_end(args);
printCallSite(file, line, function);
@@ -452,7 +533,7 @@ void WTFLogAlwaysAndCrash(const char* format, ...)
va_start(args, format);
WTFLogAlwaysV(format, args);
va_end(args);
- WTFCrash();
+ CRASH();
}
WTFLogChannel* WTFLogChannelByName(WTFLogChannel* channels[], size_t count, const char* name)
@@ -474,6 +555,13 @@ static void setStateOfAllChannels(WTFLogChannel* channels[], size_t channelCount
void WTFInitializeLogChannelStatesFromString(WTFLogChannel* channels[], size_t count, const char* logLevel)
{
+#if !RELEASE_LOG_DISABLED
+ for (size_t i = 0; i < count; ++i) {
+ WTFLogChannel* channel = channels[i];
+ channel->osLogChannel = os_log_create(channel->subsystem, channel->name);
+ }
+#endif
+
String logLevelString = logLevel;
Vector<String> components;
logLevelString.split(',', components);
@@ -487,7 +575,7 @@ void WTFInitializeLogChannelStatesFromString(WTFLogChannel* channels[], size_t c
component = component.substring(1);
}
- if (equalIgnoringCase(component, "all")) {
+ if (equalLettersIgnoringASCIICase(component, "all")) {
setStateOfAllChannels(channels, count, logChannelState);
continue;
}
@@ -500,3 +588,17 @@ void WTFInitializeLogChannelStatesFromString(WTFLogChannel* channels[], size_t c
}
} // extern "C"
+
+namespace WTF {
+
+void resetAccumulatedLogs()
+{
+ loggingAccumulator().resetAccumulatedLogs();
+}
+
+String getAndResetAccumulatedLogs()
+{
+ return loggingAccumulator().getAndResetAccumulatedLogs();
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/Assertions.h b/Source/WTF/wtf/Assertions.h
index 4d968b865..3158c1039 100644
--- a/Source/WTF/wtf/Assertions.h
+++ b/Source/WTF/wtf/Assertions.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -26,20 +26,30 @@
#ifndef WTF_Assertions_h
#define WTF_Assertions_h
+#include <wtf/Platform.h>
+
/*
no namespaces because this file has to be includable from C and Objective-C
Note, this file uses many GCC extensions, but it should be compatible with
C, Objective C, C++, and Objective C++.
- For non-debug builds, everything is disabled by default.
- Defining any of the symbols explicitly prevents this from having any effect.
+ For non-debug builds, everything is disabled by default except for "always
+ on" logging. Defining any of the symbols explicitly prevents this from
+ having any effect.
*/
+#undef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#include <stdarg.h>
+#include <stdbool.h>
#include <stddef.h>
-#include <wtf/Platform.h>
+#include <wtf/ExportMacros.h>
+
+#if USE(OS_LOG)
+#include <os/log.h>
+#endif
#ifdef NDEBUG
/* Disable ASSERT* macros in release mode. */
@@ -76,16 +86,23 @@
#define LOG_DISABLED ASSERTIONS_DISABLED_DEFAULT
#endif
-#if COMPILER(GCC)
+#ifndef RELEASE_LOG_DISABLED
+#define RELEASE_LOG_DISABLED !(USE(OS_LOG))
+#endif
+
+#if COMPILER(GCC_OR_CLANG)
#define WTF_PRETTY_FUNCTION __PRETTY_FUNCTION__
#else
#define WTF_PRETTY_FUNCTION __FUNCTION__
#endif
+#if COMPILER(MINGW)
+/* By default MinGW emits warnings when C99 format attributes are used, even if __USE_MINGW_ANSI_STDIO is defined */
+#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(gnu_printf, formatStringArgument, extraArguments)))
+#elif COMPILER(GCC_OR_CLANG) && !defined(__OBJC__)
/* WTF logging functions can process %@ in the format string to log a NSObject* but the printf format attribute
emits a warning when %@ is used in the format string. Until <rdar://problem/5195437> is resolved we can't include
the attribute when being used from Objective-C code in case it decides to use %@. */
-#if COMPILER(GCC) && !defined(__OBJC__)
#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments)))
#else
#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments)
@@ -112,19 +129,43 @@ extern "C" {
Signals are ignored by the crash reporter on OS X so we must do better.
*/
-#if COMPILER(CLANG)
+#if COMPILER(GCC_OR_CLANG) || COMPILER(MSVC)
#define NO_RETURN_DUE_TO_CRASH NO_RETURN
#else
#define NO_RETURN_DUE_TO_CRASH
#endif
-typedef enum { WTFLogChannelOff, WTFLogChannelOn } WTFLogChannelState;
+typedef enum { WTFLogChannelOff, WTFLogChannelOn, WTFLogChannelOnWithAccumulation } WTFLogChannelState;
typedef struct {
WTFLogChannelState state;
const char* name;
+#if !RELEASE_LOG_DISABLED
+ const char* subsystem;
+ __unsafe_unretained os_log_t osLogChannel;
+#endif
} WTFLogChannel;
+#define LOG_CHANNEL(name) JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, name)
+#define LOG_CHANNEL_ADDRESS(name) &LOG_CHANNEL(name),
+#define JOIN_LOG_CHANNEL_WITH_PREFIX(prefix, channel) JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel)
+#define JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) prefix ## channel
+
+#define LOG_CHANNEL_WEBKIT_SUBSYSTEM "com.apple.WebKit"
+
+#define DECLARE_LOG_CHANNEL(name) \
+ extern WTFLogChannel LOG_CHANNEL(name);
+
+#if !defined(DEFINE_LOG_CHANNEL)
+#if RELEASE_LOG_DISABLED
+#define DEFINE_LOG_CHANNEL(name, subsystem) \
+ WTFLogChannel LOG_CHANNEL(name) = { WTFLogChannelOff, #name };
+#else
+#define DEFINE_LOG_CHANNEL(name, subsystem) \
+ WTFLogChannel LOG_CHANNEL(name) = { WTFLogChannelOff, #name, subsystem, OS_LOG_DEFAULT };
+#endif
+#endif
+
WTF_EXPORT_PRIVATE void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion);
WTF_EXPORT_PRIVATE void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
WTF_EXPORT_PRIVATE void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion);
@@ -134,7 +175,7 @@ WTF_EXPORT_PRIVATE void WTFLog(WTFLogChannel*, const char* format, ...) WTF_ATTR
WTF_EXPORT_PRIVATE void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel*, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
WTF_EXPORT_PRIVATE void WTFLogAlwaysV(const char* format, va_list);
WTF_EXPORT_PRIVATE void WTFLogAlways(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
-WTF_EXPORT_PRIVATE void WTFLogAlwaysAndCrash(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2) NO_RETURN_DUE_TO_CRASH;
+WTF_EXPORT_PRIVATE NO_RETURN_DUE_TO_CRASH void WTFLogAlwaysAndCrash(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
WTF_EXPORT_PRIVATE WTFLogChannel* WTFLogChannelByName(WTFLogChannel*[], size_t count, const char*);
WTF_EXPORT_PRIVATE void WTFInitializeLogChannelStatesFromString(WTFLogChannel*[], size_t count, const char*);
@@ -146,32 +187,42 @@ typedef void (*WTFCrashHookFunction)();
WTF_EXPORT_PRIVATE void WTFSetCrashHook(WTFCrashHookFunction);
WTF_EXPORT_PRIVATE void WTFInstallReportBacktraceOnCrashHook();
-// Exist for binary compatibility with older Safari. Do not use.
-WTF_EXPORT_PRIVATE void WTFInvokeCrashHook();
-#ifdef __cplusplus
-}
-#endif
+WTF_EXPORT_PRIVATE bool WTFIsDebuggerAttached();
#ifndef CRASH
-#define CRASH() WTFCrash()
-#endif
-#ifdef __cplusplus
-extern "C" {
+#if defined(NDEBUG) && OS(DARWIN)
+#if CPU(X86_64) || CPU(X86)
+#define WTFBreakpointTrap() __asm__ volatile ("int3")
+#elif CPU(ARM_THUMB2)
+#define WTFBreakpointTrap() __asm__ volatile ("bkpt #0")
+#elif CPU(ARM64)
+#define WTFBreakpointTrap() __asm__ volatile ("brk #0")
+#else
+#error "Unsupported CPU".
#endif
-WTF_EXPORT_PRIVATE void WTFCrash() NO_RETURN_DUE_TO_CRASH;
-#ifdef __cplusplus
-}
+
+// Crash with a SIGTRAP i.e EXC_BREAKPOINT.
+// We are not using __builtin_trap because it is only guaranteed to abort, but not necessarily
+// trigger a SIGTRAP. Instead, we use inline asm to ensure that we trigger the SIGTRAP.
+#define CRASH() do { \
+ WTFBreakpointTrap(); \
+ __builtin_unreachable(); \
+} while (0)
+#else
+#define CRASH() WTFCrash()
#endif
+#endif // CRASH
+
+WTF_EXPORT_PRIVATE NO_RETURN_DUE_TO_CRASH void WTFCrash();
+
#ifndef CRASH_WITH_SECURITY_IMPLICATION
#define CRASH_WITH_SECURITY_IMPLICATION() WTFCrashWithSecurityImplication()
#endif
-#ifdef __cplusplus
-extern "C" {
-#endif
- WTF_EXPORT_PRIVATE void WTFCrashWithSecurityImplication() NO_RETURN_DUE_TO_CRASH;
+WTF_EXPORT_PRIVATE NO_RETURN_DUE_TO_CRASH void WTFCrashWithSecurityImplication();
+
#ifdef __cplusplus
}
#endif
@@ -199,14 +250,6 @@ extern "C" {
Expressions inside them are evaluated in debug builds only.
*/
-#if OS(WINCE)
-/* FIXME: We include this here only to avoid a conflict with the ASSERT macro. */
-#include <windows.h>
-#undef min
-#undef max
-#undef ERROR
-#endif
-
#if OS(WINDOWS)
/* FIXME: Change to use something other than ASSERT to avoid this conflict with the underlying platform */
#undef ASSERT
@@ -217,11 +260,12 @@ extern "C" {
#define ASSERT(assertion) ((void)0)
#define ASSERT_AT(assertion, file, line, function) ((void)0)
#define ASSERT_NOT_REACHED() ((void)0)
+#define ASSERT_IMPLIES(condition, assertion) ((void)0)
#define NO_RETURN_DUE_TO_ASSERT
#define ASSERT_UNUSED(variable, assertion) ((void)variable)
-#ifdef ADDRESS_SANITIZER
+#if ENABLE(SECURITY_ASSERTIONS)
#define ASSERT_WITH_SECURITY_IMPLICATION(assertion) \
(!(assertion) ? \
(WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \
@@ -236,23 +280,32 @@ extern "C" {
#else
-#define ASSERT(assertion) \
- (!(assertion) ? \
- (WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \
- CRASH()) : \
- (void)0)
+#define ASSERT(assertion) do { \
+ if (!(assertion)) { \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
+ CRASH(); \
+ } \
+} while (0)
-#define ASSERT_AT(assertion, file, line, function) \
- (!(assertion) ? \
- (WTFReportAssertionFailure(file, line, function, #assertion), \
- CRASH()) : \
- (void)0)
+#define ASSERT_AT(assertion, file, line, function) do { \
+ if (!(assertion)) { \
+ WTFReportAssertionFailure(file, line, function, #assertion); \
+ CRASH(); \
+ } \
+} while (0)
#define ASSERT_NOT_REACHED() do { \
WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 0); \
CRASH(); \
} while (0)
+#define ASSERT_IMPLIES(condition, assertion) do { \
+ if ((condition) && !(assertion)) { \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #condition " => " #assertion); \
+ CRASH(); \
+ } \
+} while (0)
+
#define ASSERT_UNUSED(variable, assertion) ASSERT(assertion)
#define NO_RETURN_DUE_TO_ASSERT NO_RETURN_DUE_TO_CRASH
@@ -278,12 +331,12 @@ extern "C" {
#if ASSERT_MSG_DISABLED
#define ASSERT_WITH_MESSAGE(assertion, ...) ((void)0)
#else
-#define ASSERT_WITH_MESSAGE(assertion, ...) do \
+#define ASSERT_WITH_MESSAGE(assertion, ...) do { \
if (!(assertion)) { \
WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \
CRASH(); \
} \
-while (0)
+} while (0)
#endif
/* ASSERT_WITH_MESSAGE_UNUSED */
@@ -291,12 +344,12 @@ while (0)
#if ASSERT_MSG_DISABLED
#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) ((void)variable)
#else
-#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) do \
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) do { \
if (!(assertion)) { \
WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \
CRASH(); \
} \
-while (0)
+} while (0)
#endif
@@ -308,12 +361,12 @@ while (0)
#else
-#define ASSERT_ARG(argName, assertion) do \
+#define ASSERT_ARG(argName, assertion) do { \
if (!(assertion)) { \
WTFReportArgumentAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #argName, #assertion); \
CRASH(); \
} \
-while (0)
+} while (0)
#endif
@@ -351,9 +404,7 @@ while (0)
#if LOG_DISABLED
#define LOG(channel, ...) ((void)0)
#else
-#define LOG(channel, ...) WTFLog(&JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
-#define JOIN_LOG_CHANNEL_WITH_PREFIX(prefix, channel) JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel)
-#define JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) prefix ## channel
+#define LOG(channel, ...) WTFLog(&LOG_CHANNEL(channel), __VA_ARGS__)
#endif
/* LOG_VERBOSE */
@@ -361,7 +412,39 @@ while (0)
#if LOG_DISABLED
#define LOG_VERBOSE(channel, ...) ((void)0)
#else
-#define LOG_VERBOSE(channel, ...) WTFLogVerbose(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, &JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
+#define LOG_VERBOSE(channel, ...) WTFLogVerbose(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, &LOG_CHANNEL(channel), __VA_ARGS__)
+#endif
+
+/* RELEASE_LOG */
+
+#if RELEASE_LOG_DISABLED
+#define RELEASE_LOG( channel, format, ...) ((void)0)
+#define RELEASE_LOG_ERROR(channel, format, ...) LOG_ERROR(format, ##__VA_ARGS__)
+
+#define RELEASE_LOG_IF( isAllowed, channel, format, ...) ((void)0)
+#define RELEASE_LOG_ERROR_IF(isAllowed, channel, format, ...) do { if (isAllowed) RELEASE_LOG_ERROR(channel, format, ##__VA_ARGS__); } while (0)
+#else
+#define RELEASE_LOG( channel, format, ...) os_log( LOG_CHANNEL(channel).osLogChannel, format, ##__VA_ARGS__)
+#define RELEASE_LOG_ERROR(channel, format, ...) os_log_error(LOG_CHANNEL(channel).osLogChannel, format, ##__VA_ARGS__)
+
+#define RELEASE_LOG_IF( isAllowed, channel, format, ...) do { if (isAllowed) RELEASE_LOG( channel, format, ##__VA_ARGS__); } while (0)
+#define RELEASE_LOG_ERROR_IF(isAllowed, channel, format, ...) do { if (isAllowed) RELEASE_LOG_ERROR(channel, format, ##__VA_ARGS__); } while (0)
+#endif
+
+
+/* RELEASE_ASSERT */
+
+#if ASSERT_DISABLED
+#define RELEASE_ASSERT(assertion) do { \
+ if (UNLIKELY(!(assertion))) \
+ CRASH(); \
+} while (0)
+#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) RELEASE_ASSERT(assertion)
+#define RELEASE_ASSERT_NOT_REACHED() CRASH()
+#else
+#define RELEASE_ASSERT(assertion) ASSERT(assertion)
+#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) ASSERT_WITH_MESSAGE(assertion, __VA_ARGS__)
+#define RELEASE_ASSERT_NOT_REACHED() ASSERT_NOT_REACHED()
#endif
/* UNREACHABLE_FOR_PLATFORM */
@@ -373,47 +456,12 @@ while (0)
#pragma clang diagnostic ignored "-Wmissing-noreturn"
static inline void UNREACHABLE_FOR_PLATFORM()
{
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
#pragma clang diagnostic pop
#else
-#define UNREACHABLE_FOR_PLATFORM() ASSERT_NOT_REACHED()
-#endif
-
-#if ASSERT_DISABLED
-#define RELEASE_ASSERT(assertion) (UNLIKELY(!(assertion)) ? (CRASH()) : (void)0)
-#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) RELEASE_ASSERT(assertion)
-#define RELEASE_ASSERT_NOT_REACHED() CRASH()
-#else
-#define RELEASE_ASSERT(assertion) ASSERT(assertion)
-#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) ASSERT_WITH_MESSAGE(assertion, __VA_ARGS__)
-#define RELEASE_ASSERT_NOT_REACHED() ASSERT_NOT_REACHED()
+#define UNREACHABLE_FOR_PLATFORM() RELEASE_ASSERT_NOT_REACHED()
#endif
-/* TYPE CAST */
-
-#define TYPE_CASTS_BASE(ToClassName, argumentType, argumentName, pointerPredicate, referencePredicate) \
-inline ToClassName* to##ToClassName(argumentType* argumentName) \
-{ \
- ASSERT_WITH_SECURITY_IMPLICATION(!argumentName || (pointerPredicate)); \
- return static_cast<ToClassName*>(argumentName); \
-} \
-inline const ToClassName* to##ToClassName(const argumentType* argumentName) \
-{ \
- ASSERT_WITH_SECURITY_IMPLICATION(!argumentName || (pointerPredicate)); \
- return static_cast<const ToClassName*>(argumentName); \
-} \
-inline ToClassName& to##ToClassName(argumentType& argumentName) \
-{ \
- ASSERT_WITH_SECURITY_IMPLICATION(referencePredicate); \
- return static_cast<ToClassName&>(argumentName); \
-} \
-inline const ToClassName& to##ToClassName(const argumentType& argumentName) \
-{ \
- ASSERT_WITH_SECURITY_IMPLICATION(referencePredicate); \
- return static_cast<const ToClassName&>(argumentName); \
-} \
-void to##ToClassName(const ToClassName*); \
-void to##ToClassName(const ToClassName&);
#endif /* WTF_Assertions_h */
diff --git a/Source/WTF/wtf/Atomics.cpp b/Source/WTF/wtf/Atomics.cpp
index 01a4650c4..9f3dcf7c5 100644
--- a/Source/WTF/wtf/Atomics.cpp
+++ b/Source/WTF/wtf/Atomics.cpp
@@ -1,59 +1,26 @@
/*
- * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2008, 2010, 2012, 2015 Apple Inc. All rights reserved.
* Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based
- * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license
- * is virtually identical to the Apple license above but is included here for completeness.
- *
- * Boost Software License - Version 1.0 - August 17th, 2003
- *
- * Permission is hereby granted, free of charge, to any person or organization
- * obtaining a copy of the software and accompanying documentation covered by
- * this license (the "Software") to use, reproduce, display, distribute,
- * execute, and transmit the Software, and to prepare derivative works of the
- * Software, and to permit third-parties to whom the Software is furnished to
- * do so, all subject to the following:
- *
- * The copyright notices in the Software and this entire statement, including
- * the above license grant, this restriction and the following disclaimer,
- * must be included in all copies of the Software, in whole or in part, and
- * all derivative works of the Software, unless such copies or derivative
- * works are solely in the form of machine-executable object code generated by
- * a source language processor.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
- * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
- * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
@@ -65,7 +32,7 @@
// (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=56296). GCC >= 4.8 will support __atomic_* builtin
// functions for this purpose for all the GCC targets, but for current compilers we have to include
// our own implementation.
-#if COMPILER(GCC) && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) && USE(PTHREADS)
+#if COMPILER(GCC_OR_CLANG) && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) && USE(PTHREADS)
#include "ThreadingPrimitives.h"
diff --git a/Source/WTF/wtf/Atomics.h b/Source/WTF/wtf/Atomics.h
index 83eb0d6f4..4a8ee48e7 100644
--- a/Source/WTF/wtf/Atomics.h
+++ b/Source/WTF/wtf/Atomics.h
@@ -1,221 +1,209 @@
/*
- * Copyright (C) 2007, 2008, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2008, 2010, 2012-2016 Apple Inc. All rights reserved.
* Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based
- * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license
- * is virtually identical to the Apple license above but is included here for completeness.
- *
- * Boost Software License - Version 1.0 - August 17th, 2003
- *
- * Permission is hereby granted, free of charge, to any person or organization
- * obtaining a copy of the software and accompanying documentation covered by
- * this license (the "Software") to use, reproduce, display, distribute,
- * execute, and transmit the Software, and to prepare derivative works of the
- * Software, and to permit third-parties to whom the Software is furnished to
- * do so, all subject to the following:
- *
- * The copyright notices in the Software and this entire statement, including
- * the above license grant, this restriction and the following disclaimer,
- * must be included in all copies of the Software, in whole or in part, and
- * all derivative works of the Software, unless such copies or derivative
- * works are solely in the form of machine-executable object code generated by
- * a source language processor.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
- * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
- * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef Atomics_h
#define Atomics_h
-#include <wtf/Platform.h>
+#include <atomic>
#include <wtf/StdLibExtras.h>
#if OS(WINDOWS)
-#if !COMPILER(GCC)
+#if !COMPILER(GCC_OR_CLANG)
extern "C" void _ReadWriteBarrier(void);
#pragma intrinsic(_ReadWriteBarrier)
#endif
#include <windows.h>
+#include <intrin.h>
#endif
namespace WTF {
-#if OS(WINDOWS) && !COMPILER(GCC)
-inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, unsigned newValue)
+// Atomic wraps around std::atomic with the sole purpose of making the compare_exchange
+// operations not alter the expected value. This is more in line with how we typically
+// use CAS in our code.
+//
+// Atomic is a struct without explicitly defined constructors so that it can be
+// initialized at compile time.
+
+template<typename T>
+struct Atomic {
+ // Don't pass a non-default value for the order parameter unless you really know
+ // what you are doing and have thought about it very hard. The cost of seq_cst
+ // is usually not high enough to justify the risk.
+
+ ALWAYS_INLINE T load(std::memory_order order = std::memory_order_seq_cst) const { return value.load(order); }
+
+ ALWAYS_INLINE T loadRelaxed() const { return load(std::memory_order_relaxed); }
+
+ ALWAYS_INLINE void store(T desired, std::memory_order order = std::memory_order_seq_cst) { value.store(desired, order); }
+
+ ALWAYS_INLINE bool compareExchangeWeak(T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
+ {
+ T expectedOrActual = expected;
+ return value.compare_exchange_weak(expectedOrActual, desired, order);
+ }
+
+ ALWAYS_INLINE bool compareExchangeWeakRelaxed(T expected, T desired)
+ {
+ return compareExchangeWeak(expected, desired, std::memory_order_relaxed);
+ }
+
+ ALWAYS_INLINE bool compareExchangeWeak(T expected, T desired, std::memory_order order_success, std::memory_order order_failure)
+ {
+ T expectedOrActual = expected;
+ return value.compare_exchange_weak(expectedOrActual, desired, order_success, order_failure);
+ }
+
+ ALWAYS_INLINE T compareExchangeStrong(T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
+ {
+ T expectedOrActual = expected;
+ value.compare_exchange_strong(expectedOrActual, desired, order);
+ return expectedOrActual;
+ }
+
+ ALWAYS_INLINE T compareExchangeStrong(T expected, T desired, std::memory_order order_success, std::memory_order order_failure)
+ {
+ T expectedOrActual = expected;
+ value.compare_exchange_strong(expectedOrActual, desired, order_success, order_failure);
+ return expectedOrActual;
+ }
+
+ template<typename U>
+ ALWAYS_INLINE T exchangeAdd(U operand, std::memory_order order = std::memory_order_seq_cst) { return value.fetch_add(operand, order); }
+
+ template<typename U>
+ ALWAYS_INLINE T exchangeAnd(U operand, std::memory_order order = std::memory_order_seq_cst) { return value.fetch_and(operand, order); }
+
+ template<typename U>
+ ALWAYS_INLINE T exchangeOr(U operand, std::memory_order order = std::memory_order_seq_cst) { return value.fetch_or(operand, order); }
+
+ template<typename U>
+ ALWAYS_INLINE T exchangeSub(U operand, std::memory_order order = std::memory_order_seq_cst) { return value.fetch_sub(operand, order); }
+
+ template<typename U>
+ ALWAYS_INLINE T exchangeXor(U operand, std::memory_order order = std::memory_order_seq_cst) { return value.fetch_xor(operand, order); }
+
+ ALWAYS_INLINE T exchange(T newValue, std::memory_order order = std::memory_order_seq_cst) { return value.exchange(newValue, order); }
+
+ template<typename Func>
+ ALWAYS_INLINE bool tryTransactionRelaxed(const Func& func)
+ {
+ T oldValue = load(std::memory_order_relaxed);
+ T newValue = oldValue;
+ func(newValue);
+ return compareExchangeWeakRelaxed(oldValue, newValue);
+ }
+
+ template<typename Func>
+ ALWAYS_INLINE void transactionRelaxed(const Func& func)
+ {
+ while (!tryTransationRelaxed(func)) { }
+ }
+
+ template<typename Func>
+ ALWAYS_INLINE bool tryTransaction(const Func& func)
+ {
+ T oldValue = load(std::memory_order_relaxed);
+ T newValue = oldValue;
+ func(newValue);
+ return compareExchangeWeak(oldValue, newValue);
+ }
+
+ template<typename Func>
+ ALWAYS_INLINE void transaction(const Func& func)
+ {
+ while (!tryTransaction(func)) { }
+ }
+
+ std::atomic<T> value;
+};
+
+template<typename T>
+inline T atomicLoad(T* location, std::memory_order order = std::memory_order_seq_cst)
{
-#if OS(WINCE)
- return InterlockedCompareExchange(reinterpret_cast<LONG*>(const_cast<unsigned*>(location)), static_cast<LONG>(newValue), static_cast<LONG>(expected)) == static_cast<LONG>(expected);
-#else
- return InterlockedCompareExchange(reinterpret_cast<LONG volatile*>(location), static_cast<LONG>(newValue), static_cast<LONG>(expected)) == static_cast<LONG>(expected);
-#endif
+ return bitwise_cast<Atomic<T>*>(location)->load(order);
}
-inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* newValue)
+template<typename T>
+inline void atomicStore(T* location, T newValue, std::memory_order order = std::memory_order_seq_cst)
{
- return InterlockedCompareExchangePointer(location, newValue, expected) == expected;
+ bitwise_cast<Atomic<T>*>(location)->store(newValue, order);
}
-#else // OS(WINDOWS) && !COMPILER(GCC) --> not windows, but maybe mingw
-inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, unsigned newValue)
+
+template<typename T>
+inline bool atomicCompareExchangeWeak(T* location, T expected, T newValue, std::memory_order order = std::memory_order_seq_cst)
{
-#if ENABLE(COMPARE_AND_SWAP)
-#if CPU(X86) || CPU(X86_64)
- unsigned char result;
- asm volatile(
- "lock; cmpxchgl %3, %2\n\t"
- "sete %1"
- : "+a"(expected), "=q"(result), "+m"(*location)
- : "r"(newValue)
- : "memory"
- );
-#elif CPU(ARM_THUMB2)
- unsigned tmp;
- unsigned result;
- asm volatile(
- "movw %1, #1\n\t"
- "ldrex %2, %0\n\t"
- "cmp %3, %2\n\t"
- "bne.n 0f\n\t"
- "strex %1, %4, %0\n\t"
- "0:"
- : "+Q"(*location), "=&r"(result), "=&r"(tmp)
- : "r"(expected), "r"(newValue)
- : "memory");
- result = !result;
-#elif CPU(ARM64) && COMPILER(GCC)
- unsigned tmp;
- unsigned result;
- asm volatile(
- "mov %w1, #1\n\t"
- "ldxr %w2, [%0]\n\t"
- "cmp %w3, %w2\n\t"
- "b.ne 0f\n\t"
- "stxr %w1, %w4, [%0]\n\t"
- "0:"
- : "+r"(location), "=&r"(result), "=&r"(tmp)
- : "r"(expected), "r"(newValue)
- : "memory");
- result = !result;
-#elif CPU(ARM64)
- unsigned tmp;
- unsigned result;
- asm volatile(
- "mov %w1, #1\n\t"
- "ldxr %w2, %0\n\t"
- "cmp %w3, %w2\n\t"
- "b.ne 0f\n\t"
- "stxr %w1, %w4, %0\n\t"
- "0:"
- : "+m"(*location), "=&r"(result), "=&r"(tmp)
- : "r"(expected), "r"(newValue)
- : "memory");
- result = !result;
-#else
-#error "Bad architecture for compare and swap."
-#endif
- return result;
-#else
- UNUSED_PARAM(location);
- UNUSED_PARAM(expected);
- UNUSED_PARAM(newValue);
- CRASH();
- return false;
-#endif
+ return bitwise_cast<Atomic<T>*>(location)->compareExchangeWeak(expected, newValue, order);
}
-inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* newValue)
+template<typename T>
+inline bool atomicCompareExchangeWeakRelaxed(T* location, T expected, T newValue)
{
-#if ENABLE(COMPARE_AND_SWAP)
-#if CPU(X86_64)
- bool result;
- asm volatile(
- "lock; cmpxchgq %3, %2\n\t"
- "sete %1"
- : "+a"(expected), "=q"(result), "+m"(*location)
- : "r"(newValue)
- : "memory"
- );
- return result;
-#elif CPU(ARM64) && COMPILER(GCC)
- bool result;
- void* tmp;
- asm volatile(
- "mov %w1, #1\n\t"
- "ldxr %x2, [%0]\n\t"
- "cmp %x3, %x2\n\t"
- "b.ne 0f\n\t"
- "stxr %w1, %x4, [%0]\n\t"
- "0:"
- : "+r"(location), "=&r"(result), "=&r"(tmp)
- : "r"(expected), "r"(newValue)
- : "memory");
- return !result;
-#elif CPU(ARM64)
- bool result;
- void* tmp;
- asm volatile(
- "mov %w1, #1\n\t"
- "ldxr %x2, %0\n\t"
- "cmp %x3, %x2\n\t"
- "b.ne 0f\n\t"
- "stxr %w1, %x4, %0\n\t"
- "0:"
- : "+m"(*location), "=&r"(result), "=&r"(tmp)
- : "r"(expected), "r"(newValue)
- : "memory");
- return !result;
-#else
- return weakCompareAndSwap(bitwise_cast<unsigned*>(location), bitwise_cast<unsigned>(expected), bitwise_cast<unsigned>(newValue));
-#endif
-#else // ENABLE(COMPARE_AND_SWAP)
- UNUSED_PARAM(location);
- UNUSED_PARAM(expected);
- UNUSED_PARAM(newValue);
- CRASH();
- return 0;
-#endif // ENABLE(COMPARE_AND_SWAP)
+ return bitwise_cast<Atomic<T>*>(location)->compareExchangeWeakRelaxed(expected, newValue);
}
-#endif // OS(WINDOWS) && !COMPILER(GCC) (end of the not-windows (but maybe mingw) case)
-inline bool weakCompareAndSwapUIntPtr(volatile uintptr_t* location, uintptr_t expected, uintptr_t newValue)
+template<typename T>
+inline T atomicCompareExchangeStrong(T* location, T expected, T newValue, std::memory_order order = std::memory_order_seq_cst)
{
- return weakCompareAndSwap(reinterpret_cast<void*volatile*>(location), reinterpret_cast<void*>(expected), reinterpret_cast<void*>(newValue));
+ return bitwise_cast<Atomic<T>*>(location)->compareExchangeStrong(expected, newValue, order);
}
-inline bool weakCompareAndSwapSize(volatile size_t* location, size_t expected, size_t newValue)
+template<typename T, typename U>
+inline T atomicExchangeAdd(T* location, U operand, std::memory_order order = std::memory_order_seq_cst)
{
- return weakCompareAndSwap(reinterpret_cast<void*volatile*>(location), reinterpret_cast<void*>(expected), reinterpret_cast<void*>(newValue));
+ return bitwise_cast<Atomic<T>*>(location)->exchangeAdd(operand, order);
+}
+
+template<typename T, typename U>
+inline T atomicExchangeAnd(T* location, U operand, std::memory_order order = std::memory_order_seq_cst)
+{
+ return bitwise_cast<Atomic<T>*>(location)->exchangeAnd(operand, order);
+}
+
+template<typename T, typename U>
+inline T atomicExchangeOr(T* location, U operand, std::memory_order order = std::memory_order_seq_cst)
+{
+ return bitwise_cast<Atomic<T>*>(location)->exchangeOr(operand, order);
+}
+
+template<typename T, typename U>
+inline T atomicExchangeSub(T* location, U operand, std::memory_order order = std::memory_order_seq_cst)
+{
+ return bitwise_cast<Atomic<T>*>(location)->exchangeSub(operand, order);
+}
+
+template<typename T, typename U>
+inline T atomicExchangeXor(T* location, U operand, std::memory_order order = std::memory_order_seq_cst)
+{
+ return bitwise_cast<Atomic<T>*>(location)->exchangeXor(operand, order);
+}
+
+template<typename T>
+inline T atomicExchange(T* location, T newValue, std::memory_order order = std::memory_order_seq_cst)
+{
+ return bitwise_cast<Atomic<T>*>(location)->exchange(newValue, order);
}
// Just a compiler fence. Has no effect on the hardware, but tells the compiler
@@ -223,7 +211,7 @@ inline bool weakCompareAndSwapSize(volatile size_t* location, size_t expected, s
// to do things like register allocation and code motion over pure operations.
inline void compilerFence()
{
-#if OS(WINDOWS) && !COMPILER(GCC)
+#if OS(WINDOWS) && !COMPILER(GCC_OR_CLANG)
_ReadWriteBarrier();
#else
asm volatile("" ::: "memory");
@@ -234,122 +222,207 @@ inline void compilerFence()
// Full memory fence. No accesses will float above this, and no accesses will sink
// below it.
-inline void armV7_dmb()
+inline void arm_dmb()
{
- asm volatile("dmb sy" ::: "memory");
+ asm volatile("dmb ish" ::: "memory");
}
// Like the above, but only affects stores.
-inline void armV7_dmb_st()
+inline void arm_dmb_st()
+{
+ asm volatile("dmb ishst" ::: "memory");
+}
+
+inline void arm_isb()
{
- asm volatile("dmb st" ::: "memory");
+ asm volatile("isb" ::: "memory");
}
-inline void loadLoadFence() { armV7_dmb(); }
-inline void loadStoreFence() { armV7_dmb(); }
-inline void storeLoadFence() { armV7_dmb(); }
-inline void storeStoreFence() { armV7_dmb_st(); }
-inline void memoryBarrierAfterLock() { armV7_dmb(); }
-inline void memoryBarrierBeforeUnlock() { armV7_dmb(); }
+inline void loadLoadFence() { arm_dmb(); }
+inline void loadStoreFence() { arm_dmb(); }
+inline void storeLoadFence() { arm_dmb(); }
+inline void storeStoreFence() { arm_dmb_st(); }
+inline void memoryBarrierAfterLock() { arm_dmb(); }
+inline void memoryBarrierBeforeUnlock() { arm_dmb(); }
+inline void crossModifyingCodeFence() { arm_isb(); }
#elif CPU(X86) || CPU(X86_64)
-inline void x86_mfence()
+inline void x86_ortop()
{
-#if OS(WINDOWS) && !COMPILER(GCC)
- // I think that this does the equivalent of a dummy interlocked instruction,
- // instead of using the 'mfence' instruction, at least according to MSDN. I
- // know that it is equivalent for our purposes, but it would be good to
- // investigate if that is actually better.
+#if OS(WINDOWS)
MemoryBarrier();
+#elif CPU(X86_64)
+ // This has acqrel semantics and is much cheaper than mfence. For exampe, in the JSC GC, using
+ // mfence as a store-load fence was a 9% slow-down on Octane/splay while using this was neutral.
+ asm volatile("lock; orl $0, (%%rsp)" ::: "memory");
+#else
+ asm volatile("lock; orl $0, (%%esp)" ::: "memory");
+#endif
+}
+
+inline void x86_cpuid()
+{
+#if OS(WINDOWS)
+ int info[4];
+ __cpuid(info, 0);
+#elif CPU(X86)
+ // GCC 4.9 on x86 in PIC mode can't use %ebx, so we have to save and restore it manually.
+ // But since we don't care about what cpuid returns (we use it as a serializing instruction),
+ // we can simply throw away what cpuid put in %ebx.
+ intptr_t a = 0, c, d;
+ asm volatile(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "+a"(a), "=c"(c), "=d"(d)
+ :
+ : "memory");
#else
- asm volatile("mfence" ::: "memory");
+ intptr_t a = 0, b, c, d;
+ asm volatile(
+ "cpuid"
+ : "+a"(a), "=b"(b), "=c"(c), "=d"(d)
+ :
+ : "memory");
#endif
}
inline void loadLoadFence() { compilerFence(); }
inline void loadStoreFence() { compilerFence(); }
-inline void storeLoadFence() { x86_mfence(); }
+inline void storeLoadFence() { x86_ortop(); }
inline void storeStoreFence() { compilerFence(); }
inline void memoryBarrierAfterLock() { compilerFence(); }
inline void memoryBarrierBeforeUnlock() { compilerFence(); }
+inline void crossModifyingCodeFence() { x86_cpuid(); }
#else
-inline void loadLoadFence() { compilerFence(); }
-inline void loadStoreFence() { compilerFence(); }
-inline void storeLoadFence() { compilerFence(); }
-inline void storeStoreFence() { compilerFence(); }
-inline void memoryBarrierAfterLock() { compilerFence(); }
-inline void memoryBarrierBeforeUnlock() { compilerFence(); }
+inline void loadLoadFence() { std::atomic_thread_fence(std::memory_order_seq_cst); }
+inline void loadStoreFence() { std::atomic_thread_fence(std::memory_order_seq_cst); }
+inline void storeLoadFence() { std::atomic_thread_fence(std::memory_order_seq_cst); }
+inline void storeStoreFence() { std::atomic_thread_fence(std::memory_order_seq_cst); }
+inline void memoryBarrierAfterLock() { std::atomic_thread_fence(std::memory_order_seq_cst); }
+inline void memoryBarrierBeforeUnlock() { std::atomic_thread_fence(std::memory_order_seq_cst); }
+inline void crossModifyingCodeFence() { std::atomic_thread_fence(std::memory_order_seq_cst); } // Probably not strong enough.
#endif
-inline bool weakCompareAndSwap(uint8_t* location, uint8_t expected, uint8_t newValue)
-{
-#if ENABLE(COMPARE_AND_SWAP)
-#if !COMPILER(MSVC) && (CPU(X86) || CPU(X86_64))
- // !COMPILER(MSVC) here means "ASM_SYNTAX(AT_AND_T)"
- unsigned char result;
- asm volatile(
- "lock; cmpxchgb %3, %2\n\t"
- "sete %1"
- : "+a"(expected), "=q"(result), "+m"(*location)
- : "q"(newValue)
- : "memory"
- );
- return result;
-#elif COMPILER(MSVC) && CPU(X86)
- // COMPILER(MSVC) here means "ASM_SYNTAX(INTEL)"
- // FIXME: We need a 64-bit ASM implementation, but this cannot be inline due to
- // Microsoft's decision to exclude it from the compiler.
- bool result = false;
-
- __asm {
- mov al, expected
- mov edx, location
- mov cl, newValue
- lock cmpxchg byte ptr[edx], cl
- setz result
- }
+typedef size_t ConsumeDependency;
- return result;
+template <typename T, typename std::enable_if<sizeof(T) == 8>::type* = nullptr>
+ALWAYS_INLINE ConsumeDependency zeroWithConsumeDependency(T value)
+{
+ uint64_t dependency;
+ uint64_t copy = bitwise_cast<uint64_t>(value);
+#if CPU(ARM64)
+ // Create a magical zero value through inline assembly, whose computation
+ // isn't visible to the optimizer. This zero is then usable as an offset in
+ // further address computations: adding zero does nothing, but the compiler
+ // doesn't know it. It's magical because it creates an address dependency
+ // from the load of `location` to the uses of the dependency, which triggers
+ // the ARM ISA's address dependency rule, a.k.a. the mythical C++ consume
+ // ordering. This forces weak memory order CPUs to observe `location` and
+ // dependent loads in their store order without the reader using a barrier
+ // or an acquire load.
+ asm volatile("eor %x[dependency], %x[in], %x[in]"
+ : [dependency] "=r"(dependency)
+ : [in] "r"(copy)
+ // Lie about touching memory. Not strictly needed, but is
+ // likely to avoid unwanted load/store motion.
+ : "memory");
+#elif CPU(ARM)
+ asm volatile("eor %[dependency], %[in], %[in]"
+ : [dependency] "=r"(dependency)
+ : [in] "r"(copy)
+ : "memory");
#else
- uintptr_t locationValue = bitwise_cast<uintptr_t>(location);
- uintptr_t alignedLocationValue = locationValue & ~(sizeof(unsigned) - 1);
- uintptr_t locationOffset = locationValue - alignedLocationValue;
- ASSERT(locationOffset < sizeof(unsigned));
- unsigned* alignedLocation = bitwise_cast<unsigned*>(alignedLocationValue);
- // Make sure that this load is always issued and never optimized away.
- unsigned oldAlignedValue = *const_cast<volatile unsigned*>(alignedLocation);
-
- struct Splicer {
- static unsigned splice(unsigned value, uint8_t byte, uintptr_t byteIndex)
- {
- union {
- unsigned word;
- uint8_t bytes[sizeof(unsigned)];
- } u;
- u.word = value;
- u.bytes[byteIndex] = byte;
- return u.word;
- }
- };
-
- unsigned expectedAlignedValue = Splicer::splice(oldAlignedValue, expected, locationOffset);
- unsigned newAlignedValue = Splicer::splice(oldAlignedValue, newValue, locationOffset);
-
- return weakCompareAndSwap(alignedLocation, expectedAlignedValue, newAlignedValue);
+ // No dependency is needed for this architecture.
+ loadLoadFence();
+ dependency = 0;
+ (void)copy;
#endif
+ return static_cast<ConsumeDependency>(dependency);
+}
+
+template <typename T, typename std::enable_if<sizeof(T) == 4>::type* = nullptr>
+ALWAYS_INLINE ConsumeDependency zeroWithConsumeDependency(T value)
+{
+ uint32_t dependency;
+ uint32_t copy = bitwise_cast<uint32_t>(value);
+#if CPU(ARM64)
+ asm volatile("eor %w[dependency], %w[in], %w[in]"
+ : [dependency] "=r"(dependency)
+ : [in] "r"(copy)
+ : "memory");
+#elif CPU(ARM)
+ asm volatile("eor %[dependency], %[in], %[in]"
+ : [dependency] "=r"(dependency)
+ : [in] "r"(copy)
+ : "memory");
#else
- UNUSED_PARAM(location);
- UNUSED_PARAM(expected);
- UNUSED_PARAM(newValue);
- CRASH();
- return false;
+ loadLoadFence();
+ dependency = 0;
+ (void)copy;
#endif
+ return static_cast<ConsumeDependency>(dependency);
+}
+
+template <typename T, typename std::enable_if<sizeof(T) == 2>::type* = nullptr>
+ALWAYS_INLINE ConsumeDependency zeroWithConsumeDependency(T value)
+{
+ uint16_t copy = bitwise_cast<uint16_t>(value);
+ return zeroWithConsumeDependency(static_cast<size_t>(copy));
+}
+
+template <typename T, typename std::enable_if<sizeof(T) == 1>::type* = nullptr>
+ALWAYS_INLINE ConsumeDependency zeroWithConsumeDependency(T value)
+{
+ uint8_t copy = bitwise_cast<uint8_t>(value);
+ return zeroWithConsumeDependency(static_cast<size_t>(copy));
+}
+
+template <typename T>
+struct Consumed {
+ T value;
+ ConsumeDependency dependency;
+};
+
+// Consume load, returning the loaded `value` at `location` and a dependent-zero
+// which creates an address dependency from the `location`.
+//
+// Usage notes:
+//
+// * Regarding control dependencies: merely branching based on `value` or
+// `dependency` isn't sufficient to impose a dependency ordering: you must
+// use `dependency` in the address computation of subsequent loads which
+// should observe the store order w.r.t. `location`.
+// * Regarding memory ordering: consume load orders the `location` load with
+// susequent dependent loads *only*. It says nothing about ordering of other
+// loads!
+//
+// Caveat emptor.
+template <typename T>
+ALWAYS_INLINE auto consumeLoad(const T* location)
+{
+ typedef typename std::remove_cv<T>::type Returned;
+ Consumed<Returned> ret { };
+ // Force the read of `location` to occur exactly once and without fusing or
+ // forwarding using volatile. This is important because the compiler could
+ // otherwise rematerialize or find equivalent loads, or simply forward from
+ // a previous one, and lose the dependency we're trying so hard to
+ // create. Prevent tearing by using an atomic, but let it move around by
+ // using relaxed. We have at least a memory fence after this which prevents
+ // the load from moving too much.
+ ret.value = reinterpret_cast<const volatile std::atomic<Returned>*>(location)->load(std::memory_order_relaxed);
+ ret.dependency = zeroWithConsumeDependency(ret.value);
+ return ret;
}
} // namespace WTF
+using WTF::Atomic;
+using WTF::ConsumeDependency;
+using WTF::consumeLoad;
+
#endif // Atomics_h
diff --git a/Source/WTF/wtf/AutodrainedPool.h b/Source/WTF/wtf/AutodrainedPool.h
index 6fb0893a5..6f02c5df4 100644
--- a/Source/WTF/wtf/AutodrainedPool.h
+++ b/Source/WTF/wtf/AutodrainedPool.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,7 +31,7 @@
#include <wtf/Noncopyable.h>
-#if PLATFORM(MAC) && !defined(__OBJC__)
+#if USE(FOUNDATION) && !defined(__OBJC__)
typedef struct objc_object *id;
#endif
@@ -40,7 +40,7 @@ namespace WTF {
class AutodrainedPool {
WTF_MAKE_NONCOPYABLE(AutodrainedPool);
public:
-#if PLATFORM(MAC)
+#if USE(FOUNDATION)
WTF_EXPORT_PRIVATE AutodrainedPool();
WTF_EXPORT_PRIVATE ~AutodrainedPool();
#else
@@ -49,7 +49,7 @@ public:
#endif
private:
-#if PLATFORM(MAC)
+#if USE(FOUNDATION)
id m_pool;
#endif
};
diff --git a/Source/WTF/wtf/AutomaticThread.cpp b/Source/WTF/wtf/AutomaticThread.cpp
new file mode 100644
index 000000000..387c6e25d
--- /dev/null
+++ b/Source/WTF/wtf/AutomaticThread.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AutomaticThread.h"
+
+#include "DataLog.h"
+
+namespace WTF {
+
+static const bool verbose = false;
+
+RefPtr<AutomaticThreadCondition> AutomaticThreadCondition::create()
+{
+ return adoptRef(new AutomaticThreadCondition());
+}
+
+AutomaticThreadCondition::AutomaticThreadCondition()
+{
+}
+
+AutomaticThreadCondition::~AutomaticThreadCondition()
+{
+}
+
+void AutomaticThreadCondition::notifyOne(const AbstractLocker& locker)
+{
+ for (AutomaticThread* thread : m_threads) {
+ if (thread->isWaiting(locker)) {
+ thread->notify(locker);
+ return;
+ }
+ }
+
+ for (AutomaticThread* thread : m_threads) {
+ if (!thread->hasUnderlyingThread(locker)) {
+ thread->start(locker);
+ return;
+ }
+ }
+
+ m_condition.notifyOne();
+}
+
+void AutomaticThreadCondition::notifyAll(const AbstractLocker& locker)
+{
+ m_condition.notifyAll();
+
+ for (AutomaticThread* thread : m_threads) {
+ if (thread->isWaiting(locker))
+ thread->notify(locker);
+ else if (!thread->hasUnderlyingThread(locker))
+ thread->start(locker);
+ }
+}
+
+void AutomaticThreadCondition::wait(Lock& lock)
+{
+ m_condition.wait(lock);
+}
+
+void AutomaticThreadCondition::add(const AbstractLocker&, AutomaticThread* thread)
+{
+ ASSERT(!m_threads.contains(thread));
+ m_threads.append(thread);
+}
+
+void AutomaticThreadCondition::remove(const AbstractLocker&, AutomaticThread* thread)
+{
+ m_threads.removeFirst(thread);
+ ASSERT(!m_threads.contains(thread));
+}
+
+bool AutomaticThreadCondition::contains(const AbstractLocker&, AutomaticThread* thread)
+{
+ return m_threads.contains(thread);
+}
+
+AutomaticThread::AutomaticThread(const AbstractLocker& locker, Box<Lock> lock, RefPtr<AutomaticThreadCondition> condition)
+ : m_lock(lock)
+ , m_condition(condition)
+{
+ if (verbose)
+ dataLog(RawPointer(this), ": Allocated AutomaticThread.\n");
+ m_condition->add(locker, this);
+}
+
+AutomaticThread::~AutomaticThread()
+{
+ if (verbose)
+ dataLog(RawPointer(this), ": Deleting AutomaticThread.\n");
+ LockHolder locker(*m_lock);
+
+ // It's possible that we're in a waiting state with the thread shut down. This is a goofy way to
+ // die, but it could happen.
+ m_condition->remove(locker, this);
+}
+
+bool AutomaticThread::tryStop(const AbstractLocker&)
+{
+ if (!m_isRunning)
+ return true;
+ if (m_hasUnderlyingThread)
+ return false;
+ m_isRunning = false;
+ return true;
+}
+
+bool AutomaticThread::isWaiting(const AbstractLocker& locker)
+{
+ return hasUnderlyingThread(locker) && m_isWaiting;
+}
+
+bool AutomaticThread::notify(const AbstractLocker& locker)
+{
+ ASSERT_UNUSED(locker, hasUnderlyingThread(locker));
+ m_isWaiting = false;
+ return m_waitCondition.notifyOne();
+}
+
+void AutomaticThread::join()
+{
+ LockHolder locker(*m_lock);
+ while (m_isRunning)
+ m_isRunningCondition.wait(*m_lock);
+}
+
+void AutomaticThread::start(const AbstractLocker&)
+{
+ RELEASE_ASSERT(m_isRunning);
+
+ RefPtr<AutomaticThread> preserveThisForThread = this;
+
+ m_hasUnderlyingThread = true;
+
+ ThreadIdentifier thread = createThread(
+ "WTF::AutomaticThread",
+ [=] () {
+ if (verbose)
+ dataLog(RawPointer(this), ": Running automatic thread!\n");
+
+ RefPtr<AutomaticThread> thread = preserveThisForThread;
+ thread->threadDidStart();
+
+ if (!ASSERT_DISABLED) {
+ LockHolder locker(*m_lock);
+ ASSERT(m_condition->contains(locker, this));
+ }
+
+ auto stopImpl = [&] (const AbstractLocker& locker) {
+ thread->threadIsStopping(locker);
+ thread->m_hasUnderlyingThread = false;
+ };
+
+ auto stopPermanently = [&] (const AbstractLocker& locker) {
+ m_isRunning = false;
+ m_isRunningCondition.notifyAll();
+ stopImpl(locker);
+ };
+
+ auto stopForTimeout = [&] (const AbstractLocker& locker) {
+ stopImpl(locker);
+ };
+
+ for (;;) {
+ {
+ LockHolder locker(*m_lock);
+ for (;;) {
+ PollResult result = poll(locker);
+ if (result == PollResult::Work)
+ break;
+ if (result == PollResult::Stop)
+ return stopPermanently(locker);
+ RELEASE_ASSERT(result == PollResult::Wait);
+ // Shut the thread down after one second.
+ m_isWaiting = true;
+ bool awokenByNotify =
+ m_waitCondition.waitFor(*m_lock, 1_s);
+ if (verbose && !awokenByNotify && !m_isWaiting)
+ dataLog(RawPointer(this), ": waitFor timed out, but notified via m_isWaiting flag!\n");
+ if (m_isWaiting) {
+ m_isWaiting = false;
+ if (verbose)
+ dataLog(RawPointer(this), ": Going to sleep!\n");
+ // It's important that we don't release the lock until we have completely
+ // indicated that the thread is kaput. Otherwise we'll have a a notify
+ // race that manifests as a deadlock on VM shutdown.
+ return stopForTimeout(locker);
+ }
+ }
+ }
+
+ WorkResult result = work();
+ if (result == WorkResult::Stop) {
+ LockHolder locker(*m_lock);
+ return stopPermanently(locker);
+ }
+ RELEASE_ASSERT(result == WorkResult::Continue);
+ }
+ });
+ detachThread(thread);
+}
+
+void AutomaticThread::threadDidStart()
+{
+}
+
+void AutomaticThread::threadIsStopping(const AbstractLocker&)
+{
+}
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/AutomaticThread.h b/Source/WTF/wtf/AutomaticThread.h
new file mode 100644
index 000000000..ec6ba438f
--- /dev/null
+++ b/Source/WTF/wtf/AutomaticThread.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_AutomaticThread_h
+#define WTF_AutomaticThread_h
+
+#include <wtf/Box.h>
+#include <wtf/Condition.h>
+#include <wtf/Lock.h>
+#include <wtf/Ref.h>
+#include <wtf/ThreadSafeRefCounted.h>
+#include <wtf/Threading.h>
+#include <wtf/Vector.h>
+
+namespace WTF {
+
+// Often, we create threads that have this as their body:
+//
+// for (;;) {
+// {
+// LockHolder locker(m_lock);
+// for (;;) {
+// [1] stuff that could break, return, or fall through;
+// m_condition.wait(m_lock);
+// }
+// }
+//
+// [2] do work;
+// }
+//
+// When we do this, we don't always do a good job of managing this thread's lifetime, which may lead
+// to this thread sitting around even when it is not needed.
+//
+// AutomaticThread is here to help you in these situations. It encapsulates a lock, a condition
+// variable, and a thread. It will automatically shut the thread down after 1 second of inactivity.
+// You use AutomaticThread by subclassing it, and put any state that is needed between [1] and [2]
+// in the subclass.
+//
+// The terminology we use is:
+//
+// [1] PollResult AutomaticThread::poll()
+// [2] WordResult AutomaticThread::work()
+//
+// Note that poll() and work() may not be called on the same thread every time, since this will shut
+// down the thread as necessary. This is legal since m_condition.wait(m_lock) can drop the lock, and
+// so there is no reason to keep the thread around.
+
+class AutomaticThread;
+
+class AutomaticThreadCondition : public ThreadSafeRefCounted<AutomaticThreadCondition> {
+public:
+ static WTF_EXPORT_PRIVATE RefPtr<AutomaticThreadCondition> create();
+
+ WTF_EXPORT_PRIVATE ~AutomaticThreadCondition();
+
+ WTF_EXPORT_PRIVATE void notifyOne(const AbstractLocker&);
+ WTF_EXPORT_PRIVATE void notifyAll(const AbstractLocker&);
+
+ // You can reuse this condition for other things, just as you would any other condition.
+ // However, since conflating conditions could lead to thundering herd, it's best to avoid it.
+ // One known-good case for one-true-condition is when the communication involves just two
+ // threads. In such cases, the thread doing the notifyAll() can wake up at most one thread -
+ // its partner.
+ WTF_EXPORT_PRIVATE void wait(Lock&);
+
+private:
+ friend class AutomaticThread;
+
+ WTF_EXPORT_PRIVATE AutomaticThreadCondition();
+
+ void add(const AbstractLocker&, AutomaticThread*);
+ void remove(const AbstractLocker&, AutomaticThread*);
+ bool contains(const AbstractLocker&, AutomaticThread*);
+
+ Condition m_condition;
+ Vector<AutomaticThread*> m_threads;
+};
+
+class WTF_EXPORT_PRIVATE AutomaticThread : public ThreadSafeRefCounted<AutomaticThread> {
+public:
+ // Note that if you drop all of your references to an AutomaticThread then as soon as there is a
+ // second during which it doesn't get woken up, it will simply die on its own. This is a
+ // permanent kind of death where the AutomaticThread object goes away, rather than the temporary
+ // kind of death where AutomaticThread lives but its underlying thread dies. All you have to do
+ // to prevent permanent death is keep a ref to AutomaticThread. At time of writing, every user of
+ // AutomaticThread keeps a ref to it and does join() as part of the shutdown process, so only the
+ // temporary kind of automatic death happens in practice. We keep the permanent death feature
+ // because it leads to an easy-to-understand reference counting discipline (AutomaticThread holds
+ // strong ref to AutomaticThreadCondition and the underlying thread holds a strong ref to
+ // AutomaticThread).
+ virtual ~AutomaticThread();
+
+ // Sometimes it's possible to optimize for the case that there is no underlying thread.
+ bool hasUnderlyingThread(const AbstractLocker&) const { return m_hasUnderlyingThread; }
+
+ // This attempts to quickly stop the thread. This will succeed if the thread happens to not be
+ // running. Returns true if the thread has been stopped. A good idiom for stopping your automatic
+ // thread is to first try this, and if that doesn't work, to tell the thread using your own
+ // mechanism (set some flag and then notify the condition).
+ bool tryStop(const AbstractLocker&);
+
+ bool isWaiting(const AbstractLocker&);
+
+ bool notify(const AbstractLocker&);
+
+ void join();
+
+protected:
+ // This logically creates the thread, but in reality the thread won't be created until someone
+ // calls AutomaticThreadCondition::notifyOne() or notifyAll().
+ AutomaticThread(const AbstractLocker&, Box<Lock>, RefPtr<AutomaticThreadCondition>);
+
+ // To understand PollResult and WorkResult, imagine that poll() and work() are being called like
+ // so:
+ //
+ // void AutomaticThread::runThread()
+ // {
+ // for (;;) {
+ // {
+ // LockHolder locker(m_lock);
+ // for (;;) {
+ // PollResult result = poll();
+ // if (result == PollResult::Work)
+ // break;
+ // if (result == PollResult::Stop)
+ // return;
+ // RELEASE_ASSERT(result == PollResult::Wait);
+ // m_condition.wait(m_lock);
+ // }
+ // }
+ //
+ // WorkResult result = work();
+ // if (result == WorkResult::Stop)
+ // return;
+ // RELEASE_ASSERT(result == WorkResult::Continue);
+ // }
+ // }
+
+ enum class PollResult { Work, Stop, Wait };
+ virtual PollResult poll(const AbstractLocker&) = 0;
+
+ enum class WorkResult { Continue, Stop };
+ virtual WorkResult work() = 0;
+
+ // It's sometimes useful to allocate resources while the thread is running, and to destroy them
+ // when the thread dies. These methods let you do this. You can override these methods, and you
+ // can be sure that the default ones don't do anything (so you don't need a super call).
+ virtual void threadDidStart();
+ virtual void threadIsStopping(const AbstractLocker&);
+
+private:
+ friend class AutomaticThreadCondition;
+
+ void start(const AbstractLocker&);
+
+ Box<Lock> m_lock;
+ RefPtr<AutomaticThreadCondition> m_condition;
+ bool m_isRunning { true };
+ bool m_isWaiting { false };
+ bool m_hasUnderlyingThread { false };
+ Condition m_waitCondition;
+ Condition m_isRunningCondition;
+};
+
+} // namespace WTF
+
+using WTF::AutomaticThread;
+using WTF::AutomaticThreadCondition;
+
+#endif // WTF_AutomaticThread_h
+
diff --git a/Source/WTF/wtf/BackwardsGraph.h b/Source/WTF/wtf/BackwardsGraph.h
new file mode 100644
index 000000000..65337baf1
--- /dev/null
+++ b/Source/WTF/wtf/BackwardsGraph.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_BackwardsGraph_h
+#define WTF_BackwardsGraph_h
+
+#include <wtf/FastMalloc.h>
+#include <wtf/GraphNodeWorklist.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+template<typename Graph>
+class BackwardsGraph {
+ WTF_MAKE_NONCOPYABLE(BackwardsGraph);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ // We use "#end" to refer to the synthetic root we have created.
+ static const char* rootName() { return "#end"; };
+
+ class Node {
+ public:
+ Node(typename Graph::Node node = typename Graph::Node())
+ : m_node(node)
+ {
+ }
+
+ static Node root()
+ {
+ Node result;
+ result.m_node = 0;
+ result.m_isRoot = true;
+ return result;
+ }
+
+ bool operator==(const Node& other) const
+ {
+ return m_node == other.m_node
+ && m_isRoot == other.m_isRoot;
+ }
+
+ bool operator!=(const Node& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const { return *this != Node(); }
+
+ bool isRoot() const
+ {
+ return m_isRoot;
+ }
+
+ typename Graph::Node node() const { return m_node; }
+
+ private:
+ typename Graph::Node m_node;
+ bool m_isRoot { false };
+ };
+
+ class Set {
+ public:
+ Set()
+ {
+ }
+
+ bool add(const Node& node)
+ {
+ if (node.isRoot())
+ return checkAndSet(m_hasRoot, true);
+ return m_set.add(node.node());
+ }
+
+ bool remove(const Node& node)
+ {
+ if (node.isRoot())
+ return checkAndSet(m_hasRoot, false);
+ return m_set.remove(node.node());
+ }
+
+ bool contains(const Node& node)
+ {
+ if (node.isRoot())
+ return m_hasRoot;
+ return m_set.contains(node.node());
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_hasRoot)
+ out.print(rootName(), " ");
+ out.print(m_set);
+ }
+
+ private:
+ typename Graph::Set m_set;
+ bool m_hasRoot { false };
+ };
+
+ template<typename T>
+ class Map {
+ public:
+ Map(Graph& graph)
+ : m_map(graph.template newMap<T>())
+ {
+ }
+
+ void clear()
+ {
+ m_map.clear();
+ m_root = T();
+ }
+
+ size_t size() const { return m_map.size() + 1; }
+
+ T& operator[](size_t index)
+ {
+ if (!index)
+ return m_root;
+ return m_map[index - 1];
+ }
+
+ const T& operator[](size_t index) const
+ {
+ return (*const_cast<Map*>(this))[index];
+ }
+
+ T& operator[](const Node& node)
+ {
+ if (node.isRoot())
+ return m_root;
+ return m_map[node.node()];
+ }
+
+ const T& operator[](const Node& node) const
+ {
+ return (*const_cast<Map*>(this))[node];
+ }
+
+ private:
+ typename Graph::template Map<T> m_map;
+ T m_root;
+ };
+
+ typedef Vector<Node, 4> List;
+
+ BackwardsGraph(Graph& graph)
+ : m_graph(graph)
+ {
+ GraphNodeWorklist<typename Graph::Node, typename Graph::Set> worklist;
+
+ auto addRootSuccessor = [&] (typename Graph::Node node) {
+ if (worklist.push(node)) {
+ m_rootSuccessorList.append(node);
+ m_rootSuccessorSet.add(node);
+ while (typename Graph::Node node = worklist.pop())
+ worklist.pushAll(graph.predecessors(node));
+ }
+ };
+
+ for (unsigned i = 0; i < graph.numNodes(); ++i) {
+ if (typename Graph::Node node = graph.node(i)) {
+ if (!graph.successors(node).size())
+ addRootSuccessor(node);
+ }
+ }
+
+ // At this point there will be some nodes in the graph that aren't known to the worklist. We
+ // could add any or all of them to the root successors list. Adding all of them would be a bad
+ // pessimisation. Ideally we would pick the ones that have backward edges but no forward
+ // edges. That would require thinking, so we just use a rough heuristic: add the highest
+ // numbered nodes first, which is totally fine if the input program is already sorted nicely.
+ for (unsigned i = graph.numNodes(); i--;) {
+ if (typename Graph::Node node = graph.node(i))
+ addRootSuccessor(node);
+ }
+ }
+
+ Node root() { return Node::root(); }
+
+ template<typename T>
+ Map<T> newMap() { return Map<T>(m_graph); }
+
+ List successors(const Node& node) const
+ {
+ if (node.isRoot())
+ return m_rootSuccessorList;
+ List result;
+ for (typename Graph::Node predecessor : m_graph.predecessors(node.node()))
+ result.append(predecessor);
+ return result;
+ }
+
+ List predecessors(const Node& node) const
+ {
+ if (node.isRoot())
+ return { };
+
+ List result;
+
+ if (m_rootSuccessorSet.contains(node.node()))
+ result.append(Node::root());
+
+ for (typename Graph::Node successor : m_graph.successors(node.node()))
+ result.append(successor);
+
+ return result;
+ }
+
+ unsigned index(const Node& node) const
+ {
+ if (node.isRoot())
+ return 0;
+ return m_graph.index(node.node()) + 1;
+ }
+
+ Node node(unsigned index) const
+ {
+ if (!index)
+ return Node::root();
+ return m_graph.node(index - 1);
+ }
+
+ unsigned numNodes() const
+ {
+ return m_graph.numNodes() + 1;
+ }
+
+ CString dump(Node node) const
+ {
+ StringPrintStream out;
+ if (!node)
+ out.print("<null>");
+ else if (node.isRoot())
+ out.print(rootName());
+ else
+ out.print(m_graph.dump(node.node()));
+ return out.toCString();
+ }
+
+ void dump(PrintStream& out) const
+ {
+ for (unsigned i = 0; i < numNodes(); ++i) {
+ Node node = this->node(i);
+ if (!node)
+ continue;
+ out.print(dump(node), ":\n");
+ out.print(" Preds: ");
+ CommaPrinter comma;
+ for (Node predecessor : predecessors(node))
+ out.print(comma, dump(predecessor));
+ out.print("\n");
+ out.print(" Succs: ");
+ comma = CommaPrinter();
+ for (Node successor : successors(node))
+ out.print(comma, dump(successor));
+ out.print("\n");
+ }
+ }
+
+private:
+ Graph& m_graph;
+ List m_rootSuccessorList;
+ typename Graph::Set m_rootSuccessorSet;
+};
+
+} // namespace WTF
+
+using WTF::BackwardsGraph;
+
+#endif // WTF_BackwardsGraph_h
+
diff --git a/Source/WTF/wtf/Bag.h b/Source/WTF/wtf/Bag.h
index 417402bca..01eca7472 100644
--- a/Source/WTF/wtf/Bag.h
+++ b/Source/WTF/wtf/Bag.h
@@ -30,30 +30,60 @@ namespace WTF {
template<typename T>
class Bag {
+ WTF_MAKE_NONCOPYABLE(Bag);
+ WTF_MAKE_FAST_ALLOCATED;
private:
- struct Node {
+ class Node {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ template<typename... Args>
+ Node(Args&&... args)
+ : m_item(std::forward<Args>(args)...)
+ {
+ }
+
T m_item;
Node* m_next;
};
public:
Bag()
- : m_head(0)
{
}
+
+ Bag(Bag<T>&& other)
+ {
+ ASSERT(!m_head);
+ m_head = other.m_head;
+ other.m_head = nullptr;
+ }
+
+ Bag& operator=(Bag<T>&& other)
+ {
+ m_head = other.m_head;
+ other.m_head = nullptr;
+ return *this;
+ }
~Bag()
{
+ clear();
+ }
+
+ void clear()
+ {
while (m_head) {
Node* current = m_head;
m_head = current->m_next;
delete current;
}
+ m_head = nullptr;
}
- T* add()
+ template<typename... Args>
+ T* add(Args&&... args)
{
- Node* newNode = new Node;
+ Node* newNode = new Node(std::forward<Args>(args)...);
newNode->m_next = m_head;
m_head = newNode;
return &newNode->m_item;
@@ -81,6 +111,12 @@ public:
{
return m_node == other.m_node;
}
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
private:
template<typename U> friend class WTF::Bag;
Node* m_node;
@@ -98,7 +134,7 @@ public:
bool isEmpty() const { return !m_head; }
private:
- Node* m_head;
+ Node* m_head { nullptr };
};
} // namespace WTF
diff --git a/Source/WTF/wtf/BagToHashMap.h b/Source/WTF/wtf/BagToHashMap.h
index c1be7ff95..539795fde 100644
--- a/Source/WTF/wtf/BagToHashMap.h
+++ b/Source/WTF/wtf/BagToHashMap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,8 +32,8 @@
namespace WTF {
-template<typename ElementType, typename KeyType, typename KeyGetterFunctor>
-void toHashMap(Bag<ElementType>& bag, KeyGetterFunctor& getKey, HashMap<KeyType, ElementType*>& result)
+template<typename ElementType, typename KeyType, typename HashArg, typename KeyGetterFunctor>
+void toHashMap(Bag<ElementType>& bag, KeyGetterFunctor& getKey, HashMap<KeyType, ElementType*, HashArg>& result)
{
for (typename Bag<ElementType>::iterator iter = bag.begin(); !!iter; ++iter) {
ElementType* element = *iter;
diff --git a/Source/WTF/wtf/BitVector.cpp b/Source/WTF/wtf/BitVector.cpp
index f60856c39..736ff7d28 100644
--- a/Source/WTF/wtf/BitVector.cpp
+++ b/Source/WTF/wtf/BitVector.cpp
@@ -183,6 +183,50 @@ size_t BitVector::bitCountSlow() const
bool BitVector::equalsSlowCase(const BitVector& other) const
{
+ bool result = equalsSlowCaseFast(other);
+ ASSERT(result == equalsSlowCaseSimple(other));
+ return result;
+}
+
+bool BitVector::equalsSlowCaseFast(const BitVector& other) const
+{
+ if (isInline() != other.isInline())
+ return equalsSlowCaseSimple(other);
+
+ const OutOfLineBits* myBits = outOfLineBits();
+ const OutOfLineBits* otherBits = other.outOfLineBits();
+
+ size_t myNumWords = myBits->numWords();
+ size_t otherNumWords = otherBits->numWords();
+ size_t minNumWords;
+ size_t maxNumWords;
+
+ const OutOfLineBits* longerBits;
+ if (myNumWords < otherNumWords) {
+ minNumWords = myNumWords;
+ maxNumWords = otherNumWords;
+ longerBits = otherBits;
+ } else {
+ minNumWords = otherNumWords;
+ maxNumWords = myNumWords;
+ longerBits = myBits;
+ }
+
+ for (size_t i = minNumWords; i < maxNumWords; ++i) {
+ if (longerBits->bits()[i])
+ return false;
+ }
+
+ for (size_t i = minNumWords; i--;) {
+ if (myBits->bits()[i] != otherBits->bits()[i])
+ return false;
+ }
+
+ return true;
+}
+
+bool BitVector::equalsSlowCaseSimple(const BitVector& other) const
+{
// This is really cheesy, but probably good enough for now.
for (unsigned i = std::max(size(), other.size()); i--;) {
if (get(i) != other.get(i))
diff --git a/Source/WTF/wtf/BitVector.h b/Source/WTF/wtf/BitVector.h
index 77d95f6df..762cc0460 100644
--- a/Source/WTF/wtf/BitVector.h
+++ b/Source/WTF/wtf/BitVector.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,6 +28,7 @@
#include <stdio.h>
#include <wtf/Assertions.h>
+#include <wtf/DataLog.h>
#include <wtf/HashFunctions.h>
#include <wtf/HashTraits.h>
#include <wtf/PrintStream.h>
@@ -117,24 +118,31 @@ public:
return !!(bits()[bit / bitsInPointer()] & (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1))));
}
- void quickSet(size_t bit)
+ bool quickSet(size_t bit)
{
ASSERT_WITH_SECURITY_IMPLICATION(bit < size());
- bits()[bit / bitsInPointer()] |= (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1)));
+ uintptr_t& word = bits()[bit / bitsInPointer()];
+ uintptr_t mask = static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1));
+ bool result = !!(word & mask);
+ word |= mask;
+ return result;
}
- void quickClear(size_t bit)
+ bool quickClear(size_t bit)
{
ASSERT_WITH_SECURITY_IMPLICATION(bit < size());
- bits()[bit / bitsInPointer()] &= ~(static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1)));
+ uintptr_t& word = bits()[bit / bitsInPointer()];
+ uintptr_t mask = static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1));
+ bool result = !!(word & mask);
+ word &= ~mask;
+ return result;
}
- void quickSet(size_t bit, bool value)
+ bool quickSet(size_t bit, bool value)
{
if (value)
- quickSet(bit);
- else
- quickClear(bit);
+ return quickSet(bit);
+ return quickClear(bit);
}
bool get(size_t bit) const
@@ -143,32 +151,48 @@ public:
return false;
return quickGet(bit);
}
+
+ bool contains(size_t bit) const
+ {
+ return get(bit);
+ }
- void set(size_t bit)
+ bool set(size_t bit)
{
ensureSize(bit + 1);
- quickSet(bit);
+ return quickSet(bit);
+ }
+
+ // This works like the add methods of sets. Instead of returning the previous value, like set(),
+ // it returns whether the bit transitioned from false to true.
+ bool add(size_t bit)
+ {
+ return !set(bit);
}
- void ensureSizeAndSet(size_t bit, size_t size)
+ bool ensureSizeAndSet(size_t bit, size_t size)
{
ensureSize(size);
- quickSet(bit);
+ return quickSet(bit);
}
- void clear(size_t bit)
+ bool clear(size_t bit)
{
if (bit >= size())
- return;
- quickClear(bit);
+ return false;
+ return quickClear(bit);
+ }
+
+ bool remove(size_t bit)
+ {
+ return clear(bit);
}
- void set(size_t bit, bool value)
+ bool set(size_t bit, bool value)
{
if (value)
- set(bit);
- else
- clear(bit);
+ return set(bit);
+ return clear(bit);
}
void merge(const BitVector& other)
@@ -209,6 +233,19 @@ public:
return bitCountSlow();
}
+ size_t findBit(size_t index, bool value) const
+ {
+ size_t result = findBitFast(index, value);
+ if (!ASSERT_DISABLED) {
+ size_t expectedResult = findBitSimple(index, value);
+ if (result != expectedResult) {
+ dataLog("findBit(", index, ", ", value, ") on ", *this, " should have gotten ", expectedResult, " but got ", result, "\n");
+ ASSERT_NOT_REACHED();
+ }
+ }
+ return result;
+ }
+
WTF_EXPORT_PRIVATE void dump(PrintStream& out) const;
enum EmptyValueTag { EmptyValue };
@@ -249,6 +286,46 @@ public:
return IntHash<uintptr_t>::hash(value);
}
+ class iterator {
+ public:
+ iterator()
+ : m_bitVector(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(const BitVector& bitVector, size_t index)
+ : m_bitVector(&bitVector)
+ , m_index(index)
+ {
+ }
+
+ size_t operator*() const { return m_index; }
+
+ iterator& operator++()
+ {
+ m_index = m_bitVector->findBit(m_index + 1, true);
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+ private:
+ const BitVector* m_bitVector;
+ size_t m_index;
+ };
+
+ // Use this to iterate over set bits.
+ iterator begin() const { return iterator(*this, findBit(0, true)); }
+ iterator end() const { return iterator(*this, size()); }
+
private:
static unsigned bitsInPointer()
{
@@ -283,6 +360,49 @@ private:
return WTF::bitCount(static_cast<uint64_t>(bits));
}
+ size_t findBitFast(size_t startIndex, bool value) const
+ {
+ if (isInline()) {
+ size_t index = startIndex;
+ findBitInWord(m_bitsOrPointer, index, maxInlineBits(), value);
+ return index;
+ }
+
+ const OutOfLineBits* bits = outOfLineBits();
+
+ // value = true: casts to 1, then xors to 0, then negates to 0.
+ // value = false: casts to 0, then xors to 1, then negates to -1 (i.e. all one bits).
+ uintptr_t skipValue = -(static_cast<uintptr_t>(value) ^ 1);
+ size_t numWords = bits->numWords();
+
+ size_t wordIndex = startIndex / bitsInPointer();
+ size_t startIndexInWord = startIndex - wordIndex * bitsInPointer();
+
+ while (wordIndex < numWords) {
+ uintptr_t word = bits->bits()[wordIndex];
+ if (word != skipValue) {
+ size_t index = startIndexInWord;
+ if (findBitInWord(word, index, bitsInPointer(), value))
+ return wordIndex * bitsInPointer() + index;
+ }
+
+ wordIndex++;
+ startIndexInWord = 0;
+ }
+
+ return bits->numBits();
+ }
+
+ size_t findBitSimple(size_t index, bool value) const
+ {
+ while (index < size()) {
+ if (get(index) == value)
+ return index;
+ index++;
+ }
+ return size();
+ }
+
class OutOfLineBits {
public:
size_t numBits() const { return m_numBits; }
@@ -318,6 +438,8 @@ private:
WTF_EXPORT_PRIVATE size_t bitCountSlow() const;
WTF_EXPORT_PRIVATE bool equalsSlowCase(const BitVector& other) const;
+ bool equalsSlowCaseFast(const BitVector& other) const;
+ bool equalsSlowCaseSimple(const BitVector& other) const;
WTF_EXPORT_PRIVATE uintptr_t hashSlowCase() const;
uintptr_t* bits()
diff --git a/Source/WTF/wtf/Bitmap.h b/Source/WTF/wtf/Bitmap.h
index 7b288f9ed..89b2c9da8 100644
--- a/Source/WTF/wtf/Bitmap.h
+++ b/Source/WTF/wtf/Bitmap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -27,22 +27,22 @@
namespace WTF {
-enum BitmapAtomicMode {
- // This makes concurrentTestAndSet behave just like testAndSet.
- BitmapNotAtomic,
-
- // This makes concurrentTestAndSet use compareAndSwap, so that it's
- // atomic even when used concurrently.
- BitmapAtomic
-};
-
-template<size_t size, BitmapAtomicMode atomicMode = BitmapNotAtomic, typename WordType = uint32_t>
+template<size_t bitmapSize, typename WordType = uint32_t>
class Bitmap {
+ WTF_MAKE_FAST_ALLOCATED;
+
+ static_assert(sizeof(WordType) <= sizeof(unsigned), "WordType must not be bigger than unsigned");
public:
Bitmap();
+ static constexpr size_t size()
+ {
+ return bitmapSize;
+ }
+
bool get(size_t) const;
void set(size_t);
+ void set(size_t, bool);
bool testAndSet(size_t);
bool testAndClear(size_t);
bool concurrentTestAndSet(size_t);
@@ -50,14 +50,29 @@ public:
size_t nextPossiblyUnset(size_t) const;
void clear(size_t);
void clearAll();
- int64_t findRunOfZeros(size_t) const;
- size_t count(size_t = 0) const;
+ int64_t findRunOfZeros(size_t runLength) const;
+ size_t count(size_t start = 0) const;
size_t isEmpty() const;
size_t isFull() const;
+
+ void merge(const Bitmap&);
+ void filter(const Bitmap&);
+ void exclude(const Bitmap&);
+
+ template<typename Func>
+ void forEachSetBit(const Func&) const;
+
+ void mergeAndClear(Bitmap&);
+ void setAndClear(Bitmap&);
+
+ bool operator==(const Bitmap&) const;
+ bool operator!=(const Bitmap&) const;
+
+ unsigned hash() const;
private:
static const unsigned wordSize = sizeof(WordType) * 8;
- static const unsigned words = (size + wordSize - 1) / wordSize;
+ static const unsigned words = (bitmapSize + wordSize - 1) / wordSize;
// the literal '1' is of type signed int. We want to use an unsigned
// version of the correct size when doing the calculations because if
@@ -69,26 +84,35 @@ private:
std::array<WordType, words> bits;
};
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline Bitmap<size, atomicMode, WordType>::Bitmap()
+template<size_t bitmapSize, typename WordType>
+inline Bitmap<bitmapSize, WordType>::Bitmap()
{
clearAll();
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<size, atomicMode, WordType>::get(size_t n) const
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::get(size_t n) const
{
return !!(bits[n / wordSize] & (one << (n % wordSize)));
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<size, atomicMode, WordType>::set(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::set(size_t n)
{
bits[n / wordSize] |= (one << (n % wordSize));
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<size, atomicMode, WordType>::testAndSet(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::set(size_t n, bool value)
+{
+ if (value)
+ set(n);
+ else
+ clear(n);
+}
+
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::testAndSet(size_t n)
{
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
@@ -97,8 +121,8 @@ inline bool Bitmap<size, atomicMode, WordType>::testAndSet(size_t n)
return result;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<size, atomicMode, WordType>::testAndClear(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::testAndClear(size_t n)
{
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
@@ -107,14 +131,9 @@ inline bool Bitmap<size, atomicMode, WordType>::testAndClear(size_t n)
return result;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<size, atomicMode, WordType>::concurrentTestAndSet(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::concurrentTestAndSet(size_t n)
{
- if (atomicMode == BitmapNotAtomic)
- return testAndSet(n);
-
- ASSERT(atomicMode == BitmapAtomic);
-
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
WordType* wordPtr = bits.data() + index;
@@ -123,18 +142,13 @@ inline bool Bitmap<size, atomicMode, WordType>::concurrentTestAndSet(size_t n)
oldValue = *wordPtr;
if (oldValue & mask)
return true;
- } while (!weakCompareAndSwap(wordPtr, oldValue, oldValue | mask));
+ } while (!atomicCompareExchangeWeakRelaxed(wordPtr, oldValue, static_cast<WordType>(oldValue | mask)));
return false;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<size, atomicMode, WordType>::concurrentTestAndClear(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::concurrentTestAndClear(size_t n)
{
- if (atomicMode == BitmapNotAtomic)
- return testAndClear(n);
-
- ASSERT(atomicMode == BitmapAtomic);
-
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
WordType* wordPtr = bits.data() + index;
@@ -143,37 +157,37 @@ inline bool Bitmap<size, atomicMode, WordType>::concurrentTestAndClear(size_t n)
oldValue = *wordPtr;
if (!(oldValue & mask))
return false;
- } while (!weakCompareAndSwap(wordPtr, oldValue, oldValue & ~mask));
+ } while (!atomicCompareExchangeWeakRelaxed(wordPtr, oldValue, static_cast<WordType>(oldValue & ~mask)));
return true;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<size, atomicMode, WordType>::clear(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::clear(size_t n)
{
bits[n / wordSize] &= ~(one << (n % wordSize));
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<size, atomicMode, WordType>::clearAll()
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::clearAll()
{
memset(bits.data(), 0, sizeof(bits));
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<size, atomicMode, WordType>::nextPossiblyUnset(size_t start) const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::nextPossiblyUnset(size_t start) const
{
if (!~bits[start / wordSize])
return ((start / wordSize) + 1) * wordSize;
return start + 1;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline int64_t Bitmap<size, atomicMode, WordType>::findRunOfZeros(size_t runLength) const
+template<size_t bitmapSize, typename WordType>
+inline int64_t Bitmap<bitmapSize, WordType>::findRunOfZeros(size_t runLength) const
{
if (!runLength)
runLength = 1;
- for (size_t i = 0; i <= (size - runLength) ; i++) {
+ for (size_t i = 0; i <= (bitmapSize - runLength) ; i++) {
bool found = true;
for (size_t j = i; j <= (i + runLength - 1) ; j++) {
if (get(j)) {
@@ -187,8 +201,8 @@ inline int64_t Bitmap<size, atomicMode, WordType>::findRunOfZeros(size_t runLeng
return -1;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<size, atomicMode, WordType>::count(size_t start) const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::count(size_t start) const
{
size_t result = 0;
for ( ; (start % wordSize); ++start) {
@@ -200,8 +214,8 @@ inline size_t Bitmap<size, atomicMode, WordType>::count(size_t start) const
return result;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<size, atomicMode, WordType>::isEmpty() const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::isEmpty() const
{
for (size_t i = 0; i < words; ++i)
if (bits[i])
@@ -209,8 +223,8 @@ inline size_t Bitmap<size, atomicMode, WordType>::isEmpty() const
return true;
}
-template<size_t size, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<size, atomicMode, WordType>::isFull() const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::isFull() const
{
for (size_t i = 0; i < words; ++i)
if (~bits[i])
@@ -218,5 +232,89 @@ inline size_t Bitmap<size, atomicMode, WordType>::isFull() const
return true;
}
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::merge(const Bitmap& other)
+{
+ for (size_t i = 0; i < words; ++i)
+ bits[i] |= other.bits[i];
+}
+
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::filter(const Bitmap& other)
+{
+ for (size_t i = 0; i < words; ++i)
+ bits[i] &= other.bits[i];
+}
+
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::exclude(const Bitmap& other)
+{
+ for (size_t i = 0; i < words; ++i)
+ bits[i] &= ~other.bits[i];
+}
+
+template<size_t bitmapSize, typename WordType>
+template<typename Func>
+inline void Bitmap<bitmapSize, WordType>::forEachSetBit(const Func& func) const
+{
+ for (size_t i = 0; i < words; ++i) {
+ WordType word = bits[i];
+ if (!word)
+ continue;
+ size_t base = i * wordSize;
+ for (size_t j = 0; j < wordSize; ++j) {
+ if (word & 1)
+ func(base + j);
+ word >>= 1;
+ }
+ }
+}
+
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::mergeAndClear(Bitmap& other)
+{
+ for (size_t i = 0; i < words; ++i) {
+ bits[i] |= other.bits[i];
+ other.bits[i] = 0;
+ }
+}
+
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::setAndClear(Bitmap& other)
+{
+ for (size_t i = 0; i < words; ++i) {
+ bits[i] = other.bits[i];
+ other.bits[i] = 0;
+ }
+}
+
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::operator==(const Bitmap& other) const
+{
+ for (size_t i = 0; i < words; ++i) {
+ if (bits[i] != other.bits[i])
+ return false;
+ }
+ return true;
+}
+
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::operator!=(const Bitmap& other) const
+{
+ return !(*this == other);
+}
+
+template<size_t bitmapSize, typename WordType>
+inline unsigned Bitmap<bitmapSize, WordType>::hash() const
+{
+ unsigned result = 0;
+ for (size_t i = 0; i < words; ++i)
+ result ^= IntHash<WordType>::hash(bits[i]);
+ return result;
}
+
+} // namespace WTF
+
+using WTF::Bitmap;
+
#endif
diff --git a/Source/WTF/wtf/BlockObjCExceptions.h b/Source/WTF/wtf/BlockObjCExceptions.h
new file mode 100644
index 000000000..271bf6d79
--- /dev/null
+++ b/Source/WTF/wtf/BlockObjCExceptions.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2003, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/NSException.h>
+
+WTF_EXPORT_PRIVATE NO_RETURN_DUE_TO_ASSERT void ReportBlockedObjCException(NSException *);
+
+#define BEGIN_BLOCK_OBJC_EXCEPTIONS @try {
+#define END_BLOCK_OBJC_EXCEPTIONS } @catch(NSException *localException) { ReportBlockedObjCException(localException); }
+
diff --git a/Source/WTF/wtf/BlockPtr.h b/Source/WTF/wtf/BlockPtr.h
new file mode 100644
index 000000000..1b79d4178
--- /dev/null
+++ b/Source/WTF/wtf/BlockPtr.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <Block.h>
+#include <wtf/Assertions.h>
+
+namespace WTF {
+
+extern "C" void* _NSConcreteMallocBlock[32];
+
+template<typename> class BlockPtr;
+
+template<typename R, typename... Args>
+class BlockPtr<R (Args...)> {
+public:
+ using BlockType = R (^)(Args...);
+
+ template<typename F>
+ static BlockPtr fromCallable(F function)
+ {
+ struct Descriptor {
+ uintptr_t reserved;
+ uintptr_t size;
+ void (*copy)(void *dst, const void *src);
+ void (*dispose)(const void *);
+ };
+
+ struct Block {
+ void* isa;
+ int32_t flags;
+ int32_t reserved;
+ R (*invoke)(void *, Args...);
+ const struct Descriptor* descriptor;
+ F f;
+ };
+
+ static const Descriptor descriptor {
+ 0,
+ sizeof(Block),
+
+ // We keep the copy function null - the block is already on the heap
+ // so it should never be copied.
+ nullptr,
+
+ [](const void* ptr) {
+ static_cast<Block*>(const_cast<void*>(ptr))->f.~F();
+ }
+ };
+
+ Block* block = static_cast<Block*>(malloc(sizeof(Block)));
+ block->isa = _NSConcreteMallocBlock;
+
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ };
+ const unsigned retainCount = 1;
+
+ block->flags = BLOCK_HAS_COPY_DISPOSE | BLOCK_NEEDS_FREE | (retainCount << 1);
+ block->reserved = 0;
+ block->invoke = [](void *ptr, Args... args) -> R {
+ return static_cast<Block*>(ptr)->f(std::forward<Args>(args)...);
+ };
+ block->descriptor = &descriptor;
+
+ new (&block->f) F { std::move(function) };
+
+ BlockPtr blockPtr;
+ blockPtr.m_block = reinterpret_cast<BlockType>(block);
+
+ return blockPtr;
+ }
+
+ BlockPtr()
+ : m_block(nullptr)
+ {
+ }
+
+ BlockPtr(BlockType block)
+ : m_block(Block_copy(block))
+ {
+ }
+
+ BlockPtr(const BlockPtr& other)
+ : m_block(Block_copy(other.m_block))
+ {
+ }
+
+ BlockPtr(BlockPtr&& other)
+ : m_block(std::exchange(other.m_block, nullptr))
+ {
+ }
+
+ ~BlockPtr()
+ {
+ Block_release(m_block);
+ }
+
+ BlockPtr& operator=(const BlockPtr& other)
+ {
+ if (this != &other) {
+ Block_release(m_block);
+ m_block = Block_copy(other.m_block);
+ }
+
+ return *this;
+ }
+
+ BlockPtr& operator=(BlockPtr&& other)
+ {
+ ASSERT(this != &other);
+
+ Block_release(m_block);
+ m_block = std::exchange(other.m_block, nullptr);
+
+ return *this;
+ }
+
+ BlockType get() const { return m_block; }
+
+ explicit operator bool() const { return m_block; }
+ bool operator!() const { return !m_block; }
+
+ R operator()(Args... arguments) const
+ {
+ ASSERT(m_block);
+
+ return m_block(std::forward<Args>(arguments)...);
+ }
+
+private:
+ BlockType m_block;
+};
+
+template<typename R, typename... Args>
+inline BlockPtr<R (Args...)> makeBlockPtr(R (^block)(Args...))
+{
+ return BlockPtr<R (Args...)>(block);
+}
+
+}
+
+using WTF::BlockPtr;
+using WTF::makeBlockPtr;
+
diff --git a/Source/WTF/wtf/BloomFilter.h b/Source/WTF/wtf/BloomFilter.h
index e14cb280e..afb17b4d6 100644
--- a/Source/WTF/wtf/BloomFilter.h
+++ b/Source/WTF/wtf/BloomFilter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,31 +26,150 @@
#ifndef BloomFilter_h
#define BloomFilter_h
+#include <array>
#include <wtf/text/AtomicString.h>
namespace WTF {
-// Counting bloom filter with k=2 and 8 bit counters. Uses 2^keyBits bytes of memory.
+// Bloom filter with k=2. Uses 2^keyBits/8 bytes of memory.
// False positive rate is approximately (1-e^(-2n/m))^2, where n is the number of unique
// keys and m is the table size (==2^keyBits).
+// See http://en.wikipedia.org/wiki/Bloom_filter
template <unsigned keyBits>
class BloomFilter {
WTF_MAKE_FAST_ALLOCATED;
public:
- static_assert(keyBits <= 16, "BloomFilter key size must be less than or equal to 16!");
-
static const size_t tableSize = 1 << keyBits;
+
+ BloomFilter();
+
+ void add(unsigned hash);
+ // For example SHA1::Digest.
+ template <size_t hashSize> void add(const std::array<uint8_t, hashSize>&);
+
+ void add(const BloomFilter&);
+
+ // The filter may give false positives (claim it may contain a key it doesn't)
+ // but never false negatives (claim it doesn't contain a key it does).
+ bool mayContain(unsigned hash) const;
+ template <size_t hashSize> bool mayContain(const std::array<uint8_t, hashSize>&) const;
+
+ void clear();
+
+ void add(const AtomicString& string) { add(string.impl()->existingHash()); }
+ void add(const String& string) { add(string.impl()->hash()); }
+ bool mayContain(const AtomicString& string) const { return mayContain(string.impl()->existingHash()); }
+ bool mayContain(const String& string) const { return mayContain(string.impl()->hash()); }
+
+private:
+ static const unsigned bitsPerPosition = 8 * sizeof(unsigned);
static const unsigned keyMask = (1 << keyBits) - 1;
- static uint8_t maximumCount() { return std::numeric_limits<uint8_t>::max(); }
+ static unsigned arrayIndex(unsigned key) { return key / bitsPerPosition; }
+ static unsigned bitMask(unsigned key) { return 1 << (key % bitsPerPosition); }
+ template <size_t hashSize> static std::pair<unsigned, unsigned> keysFromHash(const std::array<uint8_t, hashSize>&);
+
+ bool isBitSet(unsigned key) const;
+ void setBit(unsigned key);
+
+ std::array<unsigned, tableSize / bitsPerPosition> m_bitArray;
+};
+
+template <unsigned keyBits>
+inline BloomFilter<keyBits>::BloomFilter()
+ : m_bitArray()
+{
+}
+
+template <unsigned keyBits>
+inline bool BloomFilter<keyBits>::mayContain(unsigned hash) const
+{
+ // The top and bottom bits of the incoming hash are treated as independent bloom filter hash functions.
+ // This works well as long as the filter size is not much above 2^16.
+ return isBitSet(hash) && isBitSet(hash >> 16);
+}
+
+template <unsigned keyBits>
+inline void BloomFilter<keyBits>::add(unsigned hash)
+{
+ setBit(hash);
+ setBit(hash >> 16);
+}
+
+template <unsigned keyBits>
+template <size_t hashSize>
+inline std::pair<unsigned, unsigned> BloomFilter<keyBits>::keysFromHash(const std::array<uint8_t, hashSize>& hash)
+{
+ // We could use larger k value than 2 for long hashes.
+ static_assert(hashSize >= 2 * sizeof(unsigned), "Hash array too short");
+ return {
+ *reinterpret_cast<const unsigned*>(hash.data()),
+ *reinterpret_cast<const unsigned*>(hash.data() + sizeof(unsigned))
+ };
+}
+
+template <unsigned keyBits>
+template <size_t hashSize>
+inline bool BloomFilter<keyBits>::mayContain(const std::array<uint8_t, hashSize>& hash) const
+{
+ auto keys = keysFromHash(hash);
+ return isBitSet(keys.first) && isBitSet(keys.second);
+}
+
+template <unsigned keyBits>
+template <size_t hashSize>
+inline void BloomFilter<keyBits>::add(const std::array<uint8_t, hashSize>& hash)
+{
+ auto keys = keysFromHash(hash);
+ setBit(keys.first);
+ setBit(keys.second);
+}
+
+template <unsigned keyBits>
+inline void BloomFilter<keyBits>::add(const BloomFilter& other)
+{
+ for (size_t i = 0; i < m_bitArray.size(); ++i)
+ m_bitArray[i] |= other.m_bitArray[i];
+}
+
+template <unsigned keyBits>
+bool BloomFilter<keyBits>::isBitSet(unsigned key) const
+{
+ unsigned maskedKey = key & keyMask;
+ ASSERT(arrayIndex(maskedKey) < m_bitArray.size());
+ return m_bitArray[arrayIndex(maskedKey)] & bitMask(maskedKey);
+}
+
+template <unsigned keyBits>
+void BloomFilter<keyBits>::setBit(unsigned key)
+{
+ unsigned maskedKey = key & keyMask;
+ ASSERT(arrayIndex(maskedKey) < m_bitArray.size());
+ m_bitArray[arrayIndex(maskedKey)] |= bitMask(maskedKey);
+}
+
+template <unsigned keyBits>
+inline void BloomFilter<keyBits>::clear()
+{
+ m_bitArray.fill(0);
+}
+
+// Counting bloom filter with 8 bit counters. Uses 2^keyBits bytes of memory. Error rates as above.
+// See http://en.wikipedia.org/wiki/Bloom_filter#Counting_filters
+template <unsigned keyBits>
+class CountingBloomFilter {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ static const size_t tableSize = 1 << keyBits;
+ static unsigned maximumCount() { return std::numeric_limits<uint8_t>::max(); }
- BloomFilter() { clear(); }
+ CountingBloomFilter();
void add(unsigned hash);
void remove(unsigned hash);
// The filter may give false positives (claim it may contain a key it doesn't)
// but never false negatives (claim it doesn't contain a key it does).
- bool mayContain(unsigned hash) const { return firstSlot(hash) && secondSlot(hash); }
+ bool mayContain(unsigned hash) const { return firstBucket(hash) && secondBucket(hash); }
// The filter must be cleared before reuse even if all keys are removed.
// Otherwise overflowed keys will stick around.
@@ -71,19 +190,27 @@ public:
#endif
private:
- uint8_t& firstSlot(unsigned hash) { return m_table[hash & keyMask]; }
- uint8_t& secondSlot(unsigned hash) { return m_table[(hash >> 16) & keyMask]; }
- const uint8_t& firstSlot(unsigned hash) const { return m_table[hash & keyMask]; }
- const uint8_t& secondSlot(unsigned hash) const { return m_table[(hash >> 16) & keyMask]; }
+ static const unsigned keyMask = (1 << keyBits) - 1;
- uint8_t m_table[tableSize];
+ uint8_t& firstBucket(unsigned hash) { return m_buckets[hash & keyMask]; }
+ uint8_t& secondBucket(unsigned hash) { return m_buckets[(hash >> 16) & keyMask]; }
+ const uint8_t& firstBucket(unsigned hash) const { return m_buckets[hash & keyMask]; }
+ const uint8_t& secondBucket(unsigned hash) const { return m_buckets[(hash >> 16) & keyMask]; }
+
+ std::array<uint8_t, tableSize> m_buckets;
};
-
+
template <unsigned keyBits>
-inline void BloomFilter<keyBits>::add(unsigned hash)
+inline CountingBloomFilter<keyBits>::CountingBloomFilter()
+ : m_buckets()
{
- uint8_t& first = firstSlot(hash);
- uint8_t& second = secondSlot(hash);
+}
+
+template <unsigned keyBits>
+inline void CountingBloomFilter<keyBits>::add(unsigned hash)
+{
+ auto& first = firstBucket(hash);
+ auto& second = secondBucket(hash);
if (LIKELY(first < maximumCount()))
++first;
if (LIKELY(second < maximumCount()))
@@ -91,13 +218,13 @@ inline void BloomFilter<keyBits>::add(unsigned hash)
}
template <unsigned keyBits>
-inline void BloomFilter<keyBits>::remove(unsigned hash)
+inline void CountingBloomFilter<keyBits>::remove(unsigned hash)
{
- uint8_t& first = firstSlot(hash);
- uint8_t& second = secondSlot(hash);
+ auto& first = firstBucket(hash);
+ auto& second = secondBucket(hash);
ASSERT(first);
ASSERT(second);
- // In case of an overflow, the slot sticks in the table until clear().
+ // In case of an overflow, the bucket sticks in the table until clear().
if (LIKELY(first < maximumCount()))
--first;
if (LIKELY(second < maximumCount()))
@@ -105,27 +232,27 @@ inline void BloomFilter<keyBits>::remove(unsigned hash)
}
template <unsigned keyBits>
-inline void BloomFilter<keyBits>::clear()
+inline void CountingBloomFilter<keyBits>::clear()
{
- memset(m_table, 0, tableSize);
+ m_buckets.fill(0);
}
#if !ASSERT_DISABLED
template <unsigned keyBits>
-bool BloomFilter<keyBits>::likelyEmpty() const
+bool CountingBloomFilter<keyBits>::likelyEmpty() const
{
- for (size_t n = 0; n < tableSize; ++n) {
- if (m_table[n] && m_table[n] != maximumCount())
+ for (auto& bucket : m_buckets) {
+ if (bucket && bucket != maximumCount())
return false;
}
return true;
}
template <unsigned keyBits>
-bool BloomFilter<keyBits>::isClear() const
+bool CountingBloomFilter<keyBits>::isClear() const
{
- for (size_t n = 0; n < tableSize; ++n) {
- if (m_table[n])
+ for (auto& bucket : m_buckets) {
+ if (bucket)
return false;
}
return true;
@@ -135,5 +262,6 @@ bool BloomFilter<keyBits>::isClear() const
}
using WTF::BloomFilter;
+using WTF::CountingBloomFilter;
#endif
diff --git a/Source/WTF/wtf/BoundsCheckedPointer.h b/Source/WTF/wtf/BoundsCheckedPointer.h
deleted file mode 100644
index be0d21a2c..000000000
--- a/Source/WTF/wtf/BoundsCheckedPointer.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef WTF_BoundsCheckedPointer_h
-#define WTF_BoundsCheckedPointer_h
-
-#include <wtf/Assertions.h>
-
-namespace WTF {
-
-// Useful for when you'd like to do pointer arithmetic on a buffer, but
-// you'd also like to get some ASSERT()'s that prevent you from overflowing.
-// This should be performance-neutral in release builds, while providing
-// you with strong assertions in debug builds. Note that all of the
-// asserting happens when you actually access the pointer. You are allowed
-// to overflow or underflow with arithmetic so long as no accesses are
-// performed.
-
-template<typename T>
-class BoundsCheckedPointer {
-public:
- BoundsCheckedPointer()
- : m_pointer(0)
-#if !ASSERT_DISABLED
- , m_begin(0)
- , m_end(0)
-#endif
- {
- }
-
- BoundsCheckedPointer(T* pointer, size_t numElements)
- : m_pointer(pointer)
-#if !ASSERT_DISABLED
- , m_begin(pointer)
- , m_end(pointer + numElements)
-#endif
- {
- UNUSED_PARAM(numElements);
- }
-
- BoundsCheckedPointer(T* pointer, T* end)
- : m_pointer(pointer)
-#if !ASSERT_DISABLED
- , m_begin(pointer)
- , m_end(end)
-#endif
- {
- UNUSED_PARAM(end);
- }
-
- BoundsCheckedPointer(T* pointer, T* begin, size_t numElements)
- : m_pointer(pointer)
-#if !ASSERT_DISABLED
- , m_begin(begin)
- , m_end(begin + numElements)
-#endif
- {
- UNUSED_PARAM(begin);
- UNUSED_PARAM(numElements);
- }
-
- BoundsCheckedPointer(T* pointer, T* begin, T* end)
- : m_pointer(pointer)
-#if !ASSERT_DISABLED
- , m_begin(begin)
- , m_end(end)
-#endif
- {
- UNUSED_PARAM(begin);
- UNUSED_PARAM(end);
- }
-
- BoundsCheckedPointer& operator=(T* value)
- {
- m_pointer = value;
- return *this;
- }
-
- BoundsCheckedPointer& operator+=(ptrdiff_t amount)
- {
- m_pointer += amount;
- return *this;
- }
-
- BoundsCheckedPointer& operator-=(ptrdiff_t amount)
- {
- m_pointer -= amount;
- return *this;
- }
-
- BoundsCheckedPointer operator+(ptrdiff_t amount) const
- {
- BoundsCheckedPointer result = *this;
- result.m_pointer += amount;
- return result;
- }
-
- BoundsCheckedPointer operator-(ptrdiff_t amount) const
- {
- BoundsCheckedPointer result = *this;
- result.m_pointer -= amount;
- return result;
- }
-
- BoundsCheckedPointer operator++() // prefix
- {
- m_pointer++;
- return *this;
- }
-
- BoundsCheckedPointer operator--() // prefix
- {
- m_pointer--;
- return *this;
- }
-
- BoundsCheckedPointer operator++(int) // postfix
- {
- BoundsCheckedPointer result = *this;
- m_pointer++;
- return result;
- }
-
- BoundsCheckedPointer operator--(int) // postfix
- {
- BoundsCheckedPointer result = *this;
- m_pointer--;
- return result;
- }
-
- bool operator<(T* other) const
- {
- return m_pointer < other;
- }
-
- bool operator<=(T* other) const
- {
- return m_pointer <= other;
- }
-
- bool operator>(T* other) const
- {
- return m_pointer > other;
- }
-
- bool operator>=(T* other) const
- {
- return m_pointer >= other;
- }
-
- bool operator==(T* other) const
- {
- return m_pointer == other;
- }
-
- bool operator!=(T* other) const
- {
- return m_pointer != other;
- }
-
- bool operator<(BoundsCheckedPointer other) const
- {
- return m_pointer < other.m_pointer;
- }
-
- bool operator<=(BoundsCheckedPointer other) const
- {
- return m_pointer <= other.m_pointer;
- }
-
- bool operator>(BoundsCheckedPointer other) const
- {
- return m_pointer > other.m_pointer;
- }
-
- bool operator>=(BoundsCheckedPointer other) const
- {
- return m_pointer >= other.m_pointer;
- }
-
- bool operator==(BoundsCheckedPointer other) const
- {
- return m_pointer == other.m_pointer;
- }
-
- bool operator!=(BoundsCheckedPointer other) const
- {
- return m_pointer != other.m_pointer;
- }
-
- BoundsCheckedPointer operator!()
- {
- return !m_pointer;
- }
-
- T* get()
- {
- return m_pointer;
- }
-
- T& operator*()
- {
- validate();
- return *m_pointer;
- }
-
- const T& operator*() const
- {
- validate();
- return *m_pointer;
- }
-
- T& operator[](ptrdiff_t index)
- {
- validate(m_pointer + index);
- return m_pointer[index];
- }
-
- const T& operator[](ptrdiff_t index) const
- {
- validate(m_pointer + index);
- return m_pointer[index];
- }
-
- // The only thing this has in common with strcat() is that it
- // keeps appending from the given pointer until reaching 0.
- BoundsCheckedPointer& strcat(const T* source)
- {
- while (*source)
- *(*this)++ = *source++;
- return *this;
- }
-
-private:
- void validate(T* pointer) const
- {
- ASSERT_UNUSED(pointer, pointer >= m_begin);
-
- // This guard is designed to protect against the misaligned case.
- // A simple pointer < m_end would miss the case if, for example,
- // T = int16_t and pointer is 1 byte less than m_end.
- ASSERT_UNUSED(pointer, pointer + 1 <= m_end);
- }
-
- void validate() const
- {
- validate(m_pointer);
- }
-
- T* m_pointer;
-#if !ASSERT_DISABLED
- T* m_begin;
- T* m_end;
-#endif
-};
-
-} // namespace WTF
-
-using WTF::BoundsCheckedPointer;
-
-#endif // WTF_BoundsCheckedPointer_h
diff --git a/Source/WTF/wtf/Box.h b/Source/WTF/wtf/Box.h
new file mode 100644
index 000000000..b66132191
--- /dev/null
+++ b/Source/WTF/wtf/Box.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Box_h
+#define WTF_Box_h
+
+#include <wtf/RefPtr.h>
+#include <wtf/ThreadSafeRefCounted.h>
+
+namespace WTF {
+
+// Box<T> is a reference-counted pointer to T that allocates T using FastMalloc and prepends a reference
+// count to it.
+template<typename T>
+class Box {
+public:
+ Box()
+ {
+ }
+
+ Box(std::nullptr_t)
+ {
+ }
+
+ template<typename... Arguments>
+ static Box create(Arguments&&... arguments)
+ {
+ Box result;
+ result.m_data = adoptRef(new Data(std::forward<Arguments>(arguments)...));
+ return result;
+ }
+
+ T* get() const { return &m_data->value; }
+
+ T& operator*() const { return m_data->value; }
+ T* operator->() const { return &m_data->value; }
+
+ explicit operator bool() { return m_data; }
+
+private:
+ struct Data : ThreadSafeRefCounted<Data> {
+ template<typename... Arguments>
+ Data(Arguments&&... arguments)
+ : value(std::forward<Arguments>(arguments)...)
+ {
+ }
+
+ T value;
+ };
+
+ RefPtr<Data> m_data;
+};
+
+} // namespace WTF
+
+using WTF::Box;
+
+#endif // WTF_Box_h
+
diff --git a/Source/WTF/wtf/Brigand.h b/Source/WTF/wtf/Brigand.h
new file mode 100644
index 000000000..2000a8250
--- /dev/null
+++ b/Source/WTF/wtf/Brigand.h
@@ -0,0 +1,2489 @@
+// Brigand library
+//
+// Copyright (c) 2015 Edouard Alligand and Joel Falcou
+//
+// Boost Software License - Version 1.0 - August 17th, 2003
+//
+// Permission is hereby granted, free of charge, to any person or organization
+// obtaining a copy of the software and accompanying documentation covered by
+// this license (the "Software") to use, reproduce, display, distribute,
+// execute, and transmit the Software, and to prepare derivative works of the
+// Software, and to permit third-parties to whom the Software is furnished to
+// do so, all subject to the following:
+//
+// The copyright notices in the Software and this entire statement, including
+// the above license grant, this restriction and the following disclaimer,
+// must be included in all copies of the Software, in whole or in part, and
+// all derivative works of the Software, unless such copies or derivative
+// works are solely in the form of machine-executable object code generated by
+// a source language processor.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+// DEALINGS IN THE SOFTWARE.
+
+// This file contains a standalone version of Edouard Alligand and Joel Falcou's
+// Brigand library, which can be found at https://github.com/edouarda/brigand
+
+#pragma once
+
+#if defined(_MSC_VER) && !defined(__GNUC__) && !defined(__clang__)
+#define BRIGAND_COMP_MSVC
+#if _MSC_VER == 1900
+#define BRIGAND_COMP_MSVC_2015
+#elif _MSC_VER == 1800
+#define BRIGAND_COMP_MSVC_2013
+#endif
+#elif __GNUC__
+#ifndef __clang__
+#define BRIGAND_COMP_GCC
+#else
+#define BRIGAND_COMP_CLANG
+#endif
+#endif
+
+#define BRIGAND_NO_BOOST_SUPPORT 1
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+#include <initializer_list>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#if !defined(BRIGAND_NO_BOOST_SUPPORT)
+#include <boost/fusion/container/vector/vector_fwd.hpp>
+#include <boost/fusion/container/deque/deque_fwd.hpp>
+#include <boost/fusion/container/list/list_fwd.hpp>
+#include <boost/fusion/container/set/set_fwd.hpp>
+#include <boost/variant.hpp>
+#endif
+
+namespace brigand
+{
+ template <class... T> struct list {};
+ template<typename T, T... Values>
+ using integral_list = brigand::list< std::integral_constant<T,Values>...>;
+ using empty_sequence = brigand::list<>;
+}
+namespace brigand
+{
+ namespace lazy
+ {
+ template <class A, template<class...> class B> struct wrap;
+ template<template<class...> class A, class... T, template<class...> class B>
+ struct wrap<A<T...>, B>
+ {
+ using type = B<T...>;
+ };
+ }
+ template<class A, template<class...> class B>
+ using wrap = typename lazy::wrap<A, B>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <typename... Ts>
+ struct append_impl
+ {
+ using type = brigand::empty_sequence;
+ };
+ template <typename T>
+ struct append_impl<T>
+ {
+ using type = T;
+ };
+ template <template <typename...> class L1, template <typename...> class L2, typename... T1s,
+ typename... T2s, typename... Ts>
+ struct append_impl<L1<T1s...>, L2<T2s...>, Ts...> : append_impl<L1<T1s..., T2s...>, Ts...>
+ {
+ };
+ template <template <typename...> class L, template <typename...> class L1,
+ template <typename...> class L2, template <typename...> class L3,
+ template <typename...> class L4, template <typename...> class L5,
+ template <typename...> class L6, template <typename...> class L7,
+ template <typename...> class L8, template <typename...> class L9,
+ template <typename...> class L10, template <typename...> class L11,
+ template <typename...> class L12, template <typename...> class L13,
+ template <typename...> class L14, template <typename...> class L15,
+ template <typename...> class L16, typename... Ts, typename... T1s, typename... T2s,
+ typename... T3s, typename... T4s, typename... T5s, typename... T6s, typename... T7s,
+ typename... T8s, typename... T9s, typename... T10s, typename... T11s,
+ typename... T12s, typename... T13s, typename... T14s, typename... T15s,
+ typename... T16s, typename... Us>
+ struct append_impl<L<Ts...>, L1<T1s...>, L2<T2s...>, L3<T3s...>, L4<T4s...>, L5<T5s...>,
+ L6<T6s...>, L7<T7s...>, L8<T8s...>, L9<T9s...>, L10<T10s...>, L11<T11s...>,
+ L12<T12s...>, L13<T13s...>, L14<T14s...>, L15<T15s...>, L16<T16s...>, Us...>
+ : append_impl<L<Ts..., T1s..., T2s..., T3s..., T4s..., T5s..., T6s..., T7s..., T8s...,
+ T9s..., T10s..., T11s..., T12s..., T13s..., T14s..., T15s..., T16s...>,
+ Us...>
+ {
+ };
+}
+namespace lazy
+{
+ template <typename... Ts>
+ using append = detail::append_impl<Ts...>;
+}
+template <typename... Ts>
+using append = typename detail::append_impl<Ts...>::type;
+namespace lazy
+{
+ template <typename T>
+ struct join;
+ template<template<typename...> class L, typename...Ts>
+ struct join<L<Ts...>> : ::brigand::detail::append_impl<L<>,Ts...>
+ {
+ };
+}
+template <typename T>
+using join = wrap<T,append>;
+}
+namespace brigand
+{
+ template <typename First, typename Second>
+ struct pair
+ {
+ using first_type = First;
+ using second_type = Second;
+ };
+}
+namespace brigand
+{
+ template<typename T> struct type_ { using type = T; };
+ template<typename T> using type_from = typename T::type;
+}
+namespace brigand
+{
+ struct no_such_type_ {};
+}
+namespace brigand
+{
+namespace lazy
+{
+ template <typename M, typename K>
+ struct lookup
+ : decltype(M::at(type_<K>{}))
+ {};
+}
+ template <typename M, typename K>
+ using lookup = typename lazy::lookup<M,K>::type;
+namespace detail
+{
+ template <class... T>
+ struct map_impl;
+ template <>
+ struct map_impl<>
+ {
+ template <typename U>
+ static type_<no_such_type_> at(U);
+ template <class K>
+ static std::false_type has_key(type_<K>);
+ template <class K>
+ static map_impl erase(type_<K>);
+ template <class P>
+ static map_impl<P> insert(type_<P>);
+ };
+ template <class... Ts>
+ struct map_impl
+ {
+ private:
+ struct Pack : pair<typename Ts::first_type, Ts>... {};
+ template<class K, class P>
+ static type_<typename P::second_type> at_impl(pair<K,P>*);
+ public:
+ template<class K>
+ static decltype(at_impl<K>(static_cast<Pack*>(nullptr))) at(type_<K>);
+ template<class K>
+ static type_<no_such_type_> at(K);
+ template <class K, class = decltype(at_impl<K>(static_cast<Pack*>(nullptr)))>
+ static std::true_type has_key(type_<K>);
+ template <class K>
+ static std::false_type has_key(K);
+ template <class K>
+ static append<map_impl<>, typename std::conditional<std::is_same<K, typename Ts::first_type>::value, list<>, list<Ts>>::type...> erase(type_<K>);
+ template <class P, class = decltype(static_cast<pair<typename P::first_type, P>*>(static_cast<Pack*>(nullptr)))>
+ static map_impl insert(type_<P>);
+ template <class P>
+ static map_impl<Ts..., typename P::type> insert(P);
+ };
+ template<class... Ts>
+ struct make_map : type_<typename Ts::first_type>... {
+ using type = map_impl<Ts...>;
+ };
+}
+ template<class... Ts>
+ using map = typename detail::make_map<Ts...>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template<class, class>
+ struct dup_append_list;
+ template<template<class...> class List, class... Ts, class... Us>
+ struct dup_append_list<List<Ts...>, List<Us...>>
+ {
+ using type = List<Ts..., Ts..., Us...>;
+ };
+ template<class T, template<class...> class List, std::size_t N>
+ struct filled_list_impl
+ : dup_append_list<
+ typename filled_list_impl<T, List, N/2>::type,
+ typename filled_list_impl<T, List, N - N/2*2>::type
+ >
+ {};
+ template<class T, template<class...> class List>
+ struct filled_list_impl<T, List, 1>
+ {
+ using type = List<T>;
+ };
+ template<class T, template<class...> class List>
+ struct filled_list_impl<T, List, 0>
+ {
+ using type = List<>;
+ };
+}
+ template<class T, std::size_t N, template<class...> class List = list>
+ using filled_list = typename detail::filled_list_impl<T, List, N>::type;
+}
+namespace brigand
+{
+ namespace detail
+ {
+ template<class T> struct element_at;
+ template<class... Ts>
+ struct element_at<list<Ts...>>
+ {
+ template<class T> type_<T> static at(Ts..., type_<T>*, ...);
+ };
+ template<std::size_t N, typename Seq> struct at_impl;
+ template<std::size_t N, template<typename...> class L, class... Ts>
+ struct at_impl<N,L<Ts...>>
+ : decltype(element_at<brigand::filled_list<void const *, N>>::at(static_cast<type_<Ts>*>(nullptr)...))
+ {
+ };
+ }
+ template <class L, std::size_t Index>
+ using at_c = typename detail::at_impl<Index, L>::type;
+namespace detail
+{
+ template <typename T>
+ struct has_at_method
+ {
+ struct dummy {};
+ template <typename C, typename P>
+ static auto test(P * p) -> decltype(C::at(*p), std::true_type());
+ template <typename, typename>
+ static std::false_type test(...);
+ static const bool value = std::is_same<std::true_type, decltype(test<T, dummy>(nullptr))>::value;
+ };
+ template <class L, typename Index, bool>
+ struct at_dispatch
+ {
+ using type = at_c<L, Index::value>;
+ };
+ template <class L, typename Index>
+ struct at_dispatch<L, Index, true>
+ {
+ using type = lookup<L, Index>;
+ };
+}
+ template <class Seq, typename K>
+ using at = typename detail::at_dispatch<Seq, K, detail::has_at_method<Seq>::value>::type;
+}
+namespace brigand
+{
+ template <template <typename...> class, typename...>
+ struct bind
+ {
+ };
+}
+namespace brigand
+{
+ template<std::size_t Index> struct args
+ {
+ };
+ struct _1 {};
+ struct _2 {};
+ using _3 = args<2>;
+ using _4 = args<3>;
+ using _5 = args<4>;
+ using _6 = args<5>;
+ using _7 = args<6>;
+ using _8 = args<7>;
+ using _9 = args<8>;
+ using _state = _1;
+ using _element = _2;
+}
+namespace brigand
+{
+template <typename T>
+struct defer
+{
+};
+template <typename T>
+struct pin
+{
+};
+template <typename T>
+struct parent
+{
+};
+namespace detail
+{
+ template <typename T, typename... Ts>
+ struct packaged_lcall
+ {
+ };
+ template <typename T, typename... Ls>
+ struct apply {
+ using type = T;
+ };
+ template <template<typename...> class F, typename...Ts, typename... Args>
+ struct apply<bind<F,Ts...>, Args...>
+ {
+ using type = F<typename apply<Ts, Args...>::type...>;
+ };
+ template <template <typename...> class F, typename... Ts, typename L, typename... Ls>
+ struct apply<F<Ts...>, L, Ls...> : F<typename apply<Ts, L, Ls...>::type...>
+ {
+ };
+ template <typename T, typename... Args, typename...Ls>
+ struct apply<pin<T>, list<Args...>, Ls...>
+ {
+ using type = T;
+ };
+ template <std::size_t N, typename L, typename...Ls>
+ struct apply<args<N>, L, Ls...>
+ {
+ using type = at_c<L, N>;
+ };
+ template <typename T, typename...Ts, typename...Ls>
+ struct apply<_1, list<T, Ts...>, Ls...>
+ {
+ using type = T;
+ };
+ template <typename T, typename U, typename...Ts, typename...Ls>
+ struct apply<_2, list<T, U, Ts...>, Ls...>
+ {
+ using type = U;
+ };
+ template <typename T, typename L, typename...Ls>
+ struct apply<parent<T>, L, Ls...> : apply<T,Ls...>
+ {
+ };
+ template <typename Lambda, typename L, typename...Ls>
+ struct apply<defer<Lambda>, L, Ls...>
+ {
+ using type = packaged_lcall<Lambda, L, Ls...>;
+ };
+ template <template <typename...> class Lambda, typename... Ts, typename... PLs, typename L, typename...Ls>
+ struct apply<packaged_lcall<Lambda<Ts...>, PLs...>, L, Ls...> : Lambda<typename apply<Ts, L, Ls..., PLs...>::type...>
+ {
+ };
+ template <template <typename...> class Lambda, typename... Ts, typename... PLs, typename L, typename...Ls>
+ struct apply<packaged_lcall<bind<Lambda,Ts...>, PLs...>, L, Ls...>
+ {
+ using type = Lambda<typename apply<Ts, L, Ls..., PLs...>::type...>;
+ };
+ template<typename T, typename...Ts>
+ using bound_apply = typename apply<T, brigand::list<Ts...>>::type;
+}
+template <typename Lambda, typename... Args>
+using apply = typename detail::apply<Lambda, brigand::list<Args...>>::type;
+}
+namespace brigand
+{
+ template<std::size_t Index> struct args;
+ namespace detail
+ {
+ template<typename T, typename List>
+ struct substitute_impl
+ {
+ using type = T;
+ };
+ template<template<class...> class T, typename... Ts, typename List>
+ struct substitute_impl<T<Ts...>,List>
+ {
+ using type = T<typename substitute_impl<Ts,List>::type...>;
+ };
+ template<std::size_t Index, typename List>
+ struct substitute_impl<args<Index>,List>
+ {
+ using type = brigand::at_c<List,Index>;
+ };
+ }
+ template<typename T, typename List>
+ using substitute = typename detail::substitute_impl<T,List>::type;
+}
+
+namespace brigand
+{
+ template <std::int8_t V>
+ using int8_t = std::integral_constant<std::int8_t, V>;
+ template <std::uint8_t V>
+ using uint8_t = std::integral_constant<std::uint8_t, V>;
+ template <std::int16_t V>
+ using int16_t = std::integral_constant<std::int16_t, V>;
+ template <std::uint16_t V>
+ using uint16_t = std::integral_constant<std::uint16_t, V>;
+ template <std::int32_t V>
+ using int32_t = std::integral_constant<std::int32_t, V>;
+ template <std::uint32_t V>
+ using uint32_t = std::integral_constant<std::uint32_t, V>;
+ template <std::int64_t V>
+ using int64_t = std::integral_constant<std::int64_t, V>;
+ template <std::uint64_t V>
+ using uint64_t = std::integral_constant<std::uint64_t, V>;
+ template<std::size_t V>
+ using size_t = std::integral_constant<std::size_t, V>;
+ template<std::ptrdiff_t V>
+ using ptrdiff_t = std::integral_constant<std::ptrdiff_t, V>;
+}
+
+namespace brigand
+{
+namespace detail
+{
+ constexpr std::size_t count_bools(bool const * const begin, bool const * const end,
+ std::size_t n)
+ {
+ return begin == end ? n : detail::count_bools(begin + 1, end, n + *begin);
+ }
+ template <bool... Bs>
+ struct template_count_bools
+ {
+ using type = ::brigand::size_t<0>;
+ };
+ template <bool B, bool... Bs>
+ struct template_count_bools<B, Bs...>
+ {
+ using type = ::brigand::size_t<B + template_count_bools<Bs...>::type::value>;
+ };
+ template <bool B1, bool B2, bool B3, bool B4, bool B5, bool B6, bool B7, bool B8, bool B9,
+ bool B10, bool B11, bool B12, bool B13, bool B14, bool B15, bool B16, bool... Bs>
+ struct template_count_bools<B1, B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15,
+ B16, Bs...>
+ {
+ using type =
+ ::brigand::size_t<B1 + B2 + B3 + B4 + B5 + B6 + B7 + B8 + B9 + B10 + B11 + B12 + B13 +
+ B14 + B15 + B16 + template_count_bools<Bs...>::type::value>;
+ };
+}
+namespace lazy
+{
+ template <typename List, typename Pred>
+ struct count_if
+ {
+ };
+ template <template <typename...> class S, typename Pred>
+ struct count_if<S<>, Pred>
+ {
+ using type = ::brigand::size_t<0>;
+ };
+#if defined(BRIGAND_COMP_GCC) || defined(BRIGAND_COMP_CLANG)
+ template <template <typename...> class S, template <typename...> class F>
+ struct count_if<S<>, bind<F, _1>>
+ {
+ using type = ::brigand::size_t<0>;
+ };
+ template <template <typename...> class S, template <typename...> class F>
+ struct count_if<S<>, F<_1>>
+ {
+ using type = ::brigand::size_t<0>;
+ };
+ template <template<typename...> class S, typename... Ts, typename Pred>
+ struct count_if<S<Ts...>, Pred>
+ {
+ static constexpr bool s_v[] = { ::brigand::apply<Pred, Ts>::type::value... };
+ using type = brigand::size_t<::brigand::detail::count_bools(s_v, s_v + sizeof...(Ts), 0u)>;
+ };
+ template <template <typename...> class S, typename... Ts, template <typename...> class F>
+ struct count_if<S<Ts...>, bind<F, _1>>
+ {
+ static constexpr bool s_v[] = { F<Ts>::value... };
+ using type = brigand::size_t<::brigand::detail::count_bools(s_v, s_v + sizeof...(Ts), 0u)>;
+ };
+ template <template <typename...> class S, typename... Ts, template <typename...> class F>
+ struct count_if<S<Ts...>, F<_1>>
+ {
+ static constexpr bool s_v[] = { F<Ts>::type::value... };
+ using type = brigand::size_t<::brigand::detail::count_bools(s_v, s_v + sizeof...(Ts), 0u)>;
+ };
+#else
+ template <template <typename...> class S, typename... Ts, typename Pred>
+ struct count_if<S<Ts...>, Pred> : ::brigand::detail::template_count_bools<::brigand::apply<Pred, Ts>::value...>
+ {
+ };
+#endif
+}
+template <typename List, typename Pred>
+using count_if = typename lazy::count_if<List, Pred>::type;
+template <class... T>
+using count = std::integral_constant<std::size_t, sizeof...(T)>;
+}
+namespace brigand
+{
+ template<class L> using size = wrap<L, count>;
+}
+namespace brigand
+{
+ namespace detail
+ {
+ template<class L, class... T> struct push_front_impl;
+ template<template<class...> class L, class... U, class... T>
+ struct push_front_impl<L<U...>, T...>
+ {
+ using type = L<T..., U...>;
+ };
+ }
+ namespace lazy {
+ template<class L, class... T>
+ struct push_front : detail::push_front_impl<L, T...>{};
+ }
+ template<class L, class... T>
+ using push_front = typename detail::push_front_impl<L, T...>::type;
+ namespace detail
+ {
+ template<class L> struct front_impl;
+ template<template<class...> class L, class T, class... U>
+ struct front_impl<L<T, U...>>
+ {
+ using type = T;
+ };
+ }
+ template <class L>
+ using front = typename detail::front_impl<L>::type;
+ namespace detail
+ {
+ template <class L, std::size_t N> struct pop_front_impl;
+ template<template<class...> class L, class T, class... U>
+ struct pop_front_impl<L<T, U...>, 1>
+ {
+ using type = L<U...>;
+ };
+ template<template<class...> class L, class> struct pop_front_element;
+ template<template<class...> class L, class... Ts>
+ struct pop_front_element<L, list<Ts...>>
+ {
+ template<class... Us>
+ static L<Us...> impl(Ts..., type_<Us>*...);
+ };
+ template<template<class...> class L, class... Ts, std::size_t N>
+ struct pop_front_impl<L<Ts...>, N>
+ {
+ using type = decltype(pop_front_element<L, filled_list<
+ void const *, N
+ >>::impl(static_cast<type_<Ts>*>(nullptr)...));
+ };
+ }
+ namespace lazy {
+ template <class L, class N = std::integral_constant<std::size_t, 1>>
+ struct pop_front : detail::pop_front_impl<L, N::value> {};
+ }
+ template <class L, class N = std::integral_constant<std::size_t, 1>>
+ using pop_front = typename detail::pop_front_impl<L, N::value>::type;
+}
+
+namespace brigand
+{
+namespace detail
+{
+ template<class L>
+ struct clear_impl;
+ template<template<class...> class L, class... Ts>
+ struct clear_impl<L<Ts...>>
+ {
+ using type = L<>;
+ };
+}
+ template<class L>
+ using clear = typename detail::clear_impl<L>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <bool b, typename O, typename L, std::size_t I>
+ struct split_at_impl;
+ template <template <typename...> class S, typename... Os, typename T, typename... Ts>
+ struct split_at_impl<false, S<Os...>, S<T, Ts...>, 0>
+ {
+ using type = S<S<Os...>, S<T, Ts...>>;
+ };
+ template <template <typename...> class S, typename... Os, typename... Ts>
+ struct split_at_impl<false, S<Os...>, S<Ts...>, 0>
+ {
+ using type = S<S<Os...>, S<Ts...>>;
+ };
+ template <template <typename...> class S, typename... Os, typename T, typename... Ts,
+ std::size_t I>
+ struct split_at_impl<false, S<Os...>, S<T, Ts...>, I>
+ : split_at_impl<false, S<Os..., T>, S<Ts...>, (I - 1)>
+ {
+ };
+ template <template <typename...> class S, typename... Os, typename T1, typename T2, typename T3,
+ typename T4, typename T5, typename T6, typename T7, typename T8, typename T9,
+ typename T10, typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename... Ts, std::size_t I>
+ struct split_at_impl<
+ true, S<Os...>,
+ S<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, Ts...>, I>
+ : split_at_impl<((I - 16) > 16), S<Os..., T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16>,
+ S<Ts...>, (I - 16)>
+ {
+ };
+ template <typename L, typename I>
+ struct call_split_at_impl : split_at_impl<(I::value > 16), brigand::clear<L>, L, I::value>
+ {
+ };
+}
+namespace lazy
+{
+ template <typename L, typename I>
+ using split_at = ::brigand::detail::call_split_at_impl<L, I>;
+}
+template <typename L, typename I>
+using split_at = typename ::brigand::lazy::split_at<L, I>::type;
+}
+namespace brigand
+{
+ namespace detail
+ {
+ template<class L, class... T> struct push_back_impl;
+ template<template<class...> class L, class... U, class... T>
+ struct push_back_impl<L<U...>, T...>
+ {
+ using type = L<U..., T...>;
+ };
+ }
+ template<class L, class... T>
+ using push_back = typename detail::push_back_impl<L, T...>::type;
+ template <class L>
+ using back = at_c<L, size<L>::value-1>;
+ template <class L, class N = std::integral_constant<std::size_t, 1>>
+ using pop_back = front<split_at<L, std::integral_constant<std::size_t, size<L>::value - N::value>>>;
+}
+namespace brigand
+{
+ namespace detail
+ {
+ template<class L1, class L2>
+ struct rot90;
+ template<
+ class... L1,
+ template<class...> class S1, class... T1,
+ template<class...> class S2, class... T2,
+ template<class...> class S3, class... T3,
+ template<class...> class S4, class... T4,
+ template<class...> class S5, class... T5,
+ template<class...> class S6, class... T6,
+ template<class...> class S7, class... T7,
+ template<class...> class S8, class... T8,
+ class... L2>
+ struct rot90<list<L1...>, list<
+ S1<T1...>, S2<T2...>, S3<T3...>, S4<T4...>,
+ S5<T5...>, S6<T6...>, S7<T7...>, S8<T8...>, L2...>>
+ : rot90<list<push_back<L1, T1, T2, T3, T4, T5, T6, T7, T8>...>, list<L2...>>
+ {};
+ template<class... L1, template<class...> class S, class... T, class... L2>
+ struct rot90<list<L1...>, list<S<T...>, L2...>>
+ : rot90<list<push_back<L1, T>...>, list<L2...>>
+ {};
+ template<class L1>
+ struct rot90<L1, list<>>
+ {
+ using type = L1;
+ };
+ template<class Func, class Seq1, class Seq2, class Seqs>
+ struct transform_impl;
+ template<class F, class T1, class T2, class Seq>
+ struct transform_apply;
+ template<class F, class T1, class T2, class... Ts>
+ struct transform_apply<F, T1, T2, list<Ts...>>
+ {
+ using type = brigand::apply<F, T1, T2, Ts...>;
+ };
+ template<
+ class Func,
+ template<class...> class Seq1, class... T1,
+ template<class...> class Seq2, class... T2,
+ class... Seqs>
+ struct transform_impl<Func, Seq1<T1...>, Seq2<T2...>, list<Seqs...>>
+ {
+ using type = Seq1<typename transform_apply<Func, T1, T2, Seqs>::type...>;
+ };
+ template<std::size_t N, class Seq1, class Seq2, class... FuncOrSeqs>
+ struct transform
+ : transform_impl<back<list<FuncOrSeqs...>>, Seq1, Seq2,
+ typename rot90<filled_list<list<>, size<Seq1>::value>, pop_back<list<FuncOrSeqs...>>>::type>
+ {};
+ template<template<class...> class Seq, class... T, class Func>
+ struct transform<0, Seq<T...>, Func>
+ {
+ using type = Seq<brigand::apply<Func, T>...>;
+ };
+ template<template<class...> class Seq, class... T, template<typename...> class Func>
+ struct transform<0, Seq<T...>, bind<Func, _1>>
+ {
+ using type = Seq<Func<T>...>;
+ };
+ template<template<class...> class Seq, class... T, template<typename...> class Func>
+ struct transform<0, Seq<T...>, Func<_1>>
+ {
+ using type = Seq<typename Func<T>::type...>;
+ };
+ template<template<class...> class Seq1, class... T1, template<class...> class Seq2, class... T2, class Func>
+ struct transform<1, Seq1<T1...>, Seq2<T2...>, Func>
+ {
+ using type = Seq1<brigand::apply<Func, T1, T2>...>;
+ };
+ }
+ namespace lazy
+ {
+ template<typename Sequence1, typename OpSeq1, typename... OpSeq2>
+ struct transform : detail::transform<sizeof...(OpSeq2), Sequence1, OpSeq1, OpSeq2...> {};
+ }
+ template<typename Sequence1, typename OpSeq1, typename... OpSeq2>
+ using transform = typename detail::transform<sizeof...(OpSeq2), Sequence1, OpSeq1, OpSeq2...>::type;
+}
+namespace brigand
+{
+ template <typename T>
+ struct make_integral : std::integral_constant <typename T::value_type, T::value> {};
+ template <typename L>
+ using as_integral_list = transform<L, make_integral<brigand::_1>>;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <typename L, template <class...> class Sequence>
+ struct as_sequence_impl
+ {
+ using type = wrap<L, Sequence>;
+ };
+}
+template <typename L, template <class...> class Sequence>
+using as_sequence = typename detail::as_sequence_impl<L, Sequence>::type;
+template <typename L>
+using as_list = as_sequence<L, brigand::list>;
+}
+namespace brigand
+{
+ template <typename... T>
+ struct pair_wrapper_
+ {
+ static_assert (sizeof...(T) == 2
+ , "as_pair requires a type list of exactly two types"
+ );
+ using type = no_such_type_;
+ };
+ template <typename T, typename U>
+ struct pair_wrapper_<T,U>
+ {
+ using type = std::pair<T,U>;
+ };
+ template <typename... T>
+ using pair_wrapper = typename pair_wrapper_<T...>::type;
+ template <typename L>
+ using as_pair = wrap<L, pair_wrapper>;
+}
+namespace brigand
+{
+ template <typename... T>
+ using tuple_wrapper = typename std::tuple<T...>;
+ template <typename L>
+ using as_tuple = wrap<L, tuple_wrapper>;
+}
+#if !defined(BRIGAND_NO_BOOST_SUPPORT)
+namespace brigand
+{
+ template <typename... T>
+ using fusion_vector_wrapper = boost::fusion::vector<T...>;
+ template <typename... T>
+ using fusion_list_wrapper = boost::fusion::list<T...>;
+ template <typename... T>
+ using fusion_deque_wrapper = boost::fusion::deque<T...>;
+ template <typename... T>
+ using fusion_set_wrapper = boost::fusion::set<T...>;
+ template <typename L> using as_fusion_vector = wrap<L, fusion_vector_wrapper>;
+ template <typename L> using as_fusion_deque = wrap<L, fusion_deque_wrapper>;
+ template <typename L> using as_fusion_list = wrap<L, fusion_list_wrapper>;
+ template <typename L> using as_fusion_set = wrap<L, fusion_set_wrapper>;
+}
+namespace brigand
+{
+ template <typename... T>
+ using variant_wrapper = typename boost::variant<T...>;
+ template <typename L>
+ using as_variant = wrap<L, variant_wrapper>;
+}
+#endif
+
+namespace brigand
+{
+ template <bool B>
+ using bool_ = std::integral_constant<bool, B>;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <typename Args>
+ struct non_null_impl : bool_<Args::value != 0>{};
+ using non_null = non_null_impl<_1>;
+}
+}
+
+namespace brigand
+{
+#ifdef BRIGAND_COMP_MSVC_2013
+ namespace detail
+ {
+ template <bool...> struct bools_ {};
+ template< typename Sequence, typename Predicate, typename... Ts> struct all_impl;
+ template< template<class...> class Sequence, typename Predicate, typename... Ts>
+ struct all_impl<Sequence<Ts...>, Predicate>
+ : std::is_same< bools_<true, ::brigand::apply<Predicate, Ts>::value...>
+ , bools_<::brigand::apply<Predicate, Ts>::value..., true>
+ >
+ {};
+ }
+#else
+ namespace detail
+ {
+ struct all_same
+ {
+ const bool value = false;
+ constexpr all_same(...) {}
+ template <typename T>
+ constexpr all_same(std::initializer_list<T *>) : value{ true }
+ {
+ }
+ };
+ template <typename Sequence, typename Predicate>
+ struct all_impl : bool_<true>{};
+ template <template <class...> class Sequence, typename Predicate, typename T, typename... Ts>
+ struct all_impl<Sequence<T,Ts...>, Predicate>
+ {
+ static constexpr all_same tester{ static_cast<::brigand::apply<Predicate, T> *>(nullptr),
+ static_cast<::brigand::apply<Predicate, Ts> *>(nullptr)... };
+ using type = bool_<(::brigand::apply<Predicate, T>::value != 0 && tester.value)>;
+ };
+ template <template <class...> class Sequence, template <typename...> class F, typename T,
+ typename... Ts>
+ struct all_impl<Sequence<T, Ts...>, bind<F, _1>>
+ {
+ static constexpr all_same tester{ static_cast<F<T> *>(nullptr),
+ static_cast<F<Ts> *>(nullptr)... };
+ using type = bool_<(F<T>::value != 0 && tester.value)>;
+ };
+ template <template <class...> class Sequence, template <typename...> class F, typename T,
+ typename... Ts>
+ struct all_impl<Sequence<T, Ts...>, F<_1>>
+ {
+ static constexpr all_same tester{ static_cast<typename F<T>::type *>(nullptr),
+ static_cast<typename F<Ts>::type *>(nullptr)... };
+ using type = bool_<(F<T>::type::value != 0 && tester.value)>;
+ };
+ }
+#endif
+ template <typename Sequence, typename Predicate = detail::non_null>
+ using all = typename detail::all_impl<Sequence, Predicate>::type;
+}
+namespace brigand
+{
+#ifdef BRIGAND_COMP_MSVC_2013
+ namespace detail
+ {
+ template<typename Sequence, typename Pred> struct none_impl
+ {
+ template<typename T>
+ struct nope
+ {
+ using that = brigand::apply<Pred, T>;
+ using type = bool_<!that::value>;
+ };
+ using type = all<Sequence, nope<_1>>;
+ };
+ }
+#else
+ namespace detail
+ {
+ template <typename Sequence, typename Predicate>
+ struct none_impl : bool_<true>{};
+ template <template <class...> class Sequence, typename Predicate, typename T, typename... Ts>
+ struct none_impl<Sequence<T,Ts...>, Predicate>
+ {
+ static constexpr all_same tester{ static_cast<::brigand::apply<Predicate, T> *>(nullptr),
+ static_cast<::brigand::apply<Predicate, Ts> *>(nullptr)... };
+ using type = bool_<(::brigand::apply<Predicate, T>::value == 0 && tester.value)>;
+ };
+ template <template <class...> class Sequence, template <typename...> class F, typename T,
+ typename... Ts>
+ struct none_impl<Sequence<T, Ts...>, bind<F, _1>>
+ {
+ static constexpr all_same tester{ static_cast<F<T> *>(nullptr),
+ static_cast<F<Ts> *>(nullptr)... };
+ using type = bool_<(F<T>::value == 0 && tester.value)>;
+ };
+ template <template <class...> class Sequence, template <typename...> class F, typename T,
+ typename... Ts>
+ struct none_impl<Sequence<T, Ts...>, F<_1>>
+ {
+ static constexpr all_same tester{ static_cast<typename F<T>::type *>(nullptr),
+ static_cast<typename F<Ts>::type *>(nullptr)... };
+ using type = bool_<(F<T>::type::value == 0 && tester.value)>;
+ };
+ }
+#endif
+ template< typename Sequence, typename Predicate = detail::non_null>
+ using none = typename detail::none_impl<Sequence,Predicate>::type;
+}
+namespace brigand
+{
+ namespace detail
+ {
+ template< typename Sequence, typename Predicate >
+ struct any_impl : bool_<!none<Sequence,Predicate>::value> {};
+ }
+ template<typename Sequence, typename Predicate = detail::non_null>
+ using any = typename detail::any_impl<Sequence,Predicate>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <template <typename...> class S, template <typename...> class F, typename... Ts>
+ struct finder
+ {
+ template <typename T>
+ using P = F<Ts..., T>;
+ template <bool InNext8, bool Match, typename... Ls>
+ struct find
+ {
+ using type = S<>;
+ };
+ template <typename L>
+ struct find<true, false, L>
+ {
+ using type = S<>;
+ };
+ template <typename L, typename... Ls>
+ struct find<true, true, L, Ls...>
+ {
+ using type = S<L, Ls...>;
+ };
+ template <typename L1, typename L2, typename... Ls>
+ struct find<true, false, L1, L2, Ls...> : find<true, F<Ts..., L2>::value, L2, Ls...>
+ {
+ };
+ template <typename L0, typename L1, typename L2, typename L3, typename L4, typename L5,
+ typename L6, typename L7, typename L8,
+ typename... Ls>
+ struct find<false, false, L0, L1, L2, L3, L4, L5, L6, L7, L8, Ls...>
+ : find<true, F<Ts..., L8>::value, L8, Ls...>
+ {
+ };
+ template <typename L1, typename L2, typename L3, typename L4, typename L5, typename L6,
+ typename L7, typename L8, typename L9, typename L10, typename L11, typename L12,
+ typename L13, typename L14, typename L15, typename L16,
+ typename... Ls>
+ struct find<false, false, L1, L2, L3, L4, L5, L6, L7, L8, L9, L10, L11, L12, L13, L14, L15,
+ L16, Ls...>
+ : find<(P<L9>::value || P<L10>::value || P<L11>::value || P<L12>::value ||
+ P<L13>::value || P<L14>::value || P<L15>::value || P<L16>::value),
+ P<L9>::value, L9, L10, L11, L12, L13, L14, L15, L16, Ls...>
+ {
+ };
+ };
+}
+}
+namespace brigand
+{
+namespace detail
+{
+ template <template<class...> class L, class...>
+ struct reverse_elements;
+ template <template <class...> class L>
+ struct reverse_elements<L>
+ {
+ using type = L<>;
+ };
+ template <template <class...> class L, class T0, class... Ts>
+ struct reverse_elements<L, T0, Ts...>
+ : append_impl<typename reverse_elements<L, Ts...>::type, L<T0>>
+ {
+ };
+ template <template <class...> class L, class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8, class T9, class T10, class T11, class T12, class T13, class T14, class T15, class... Ts>
+ struct reverse_elements<L, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, Ts...>
+ : append_impl<typename reverse_elements<L, Ts...>::type, L<T15, T14, T13, T12, T11, T10, T9, T8, T7, T6, T5, T4, T3, T2, T1, T0>>
+ {
+ };
+ template<class L>
+ struct reverse_impl;
+ template<template<class...> class L, class... U>
+ struct reverse_impl<L<U...>>
+ : reverse_elements<L, U...>
+ {
+ };
+}
+namespace lazy
+{
+ template <typename L>
+ using reverse = typename detail::reverse_impl<L>;
+}
+ template <typename L>
+ using reverse = typename detail::reverse_impl<L>::type;
+}
+namespace brigand
+{
+namespace lazy
+{
+ template <typename Sequence, typename Predicate = ::brigand::detail::non_null>
+ struct find;
+ template <template <typename...> class Sequence, typename... Ls, typename Pred>
+ struct find<Sequence<Ls...>, Pred>
+ : detail::finder<Sequence, detail::bound_apply, Pred>::template find<
+ false, false, void, void, void, void, void, void, void, void, Ls...>
+ {
+ };
+ template <template <typename...> class Sequence, typename... Ls, template <typename...> class F>
+ struct find<Sequence<Ls...>, bind<F, _1>>
+ : detail::finder<Sequence, F>::template find<false, false, void, void, void, void, void,
+ void, void, void, Ls...>
+ {
+ };
+}
+template <typename Sequence, typename Predicate = brigand::detail::non_null>
+using find = typename lazy::find<Sequence, Predicate>::type;
+namespace lazy
+{
+ template <typename Sequence, typename Predicate = detail::non_null>
+ using reverse_find =
+ ::brigand::lazy::reverse<::brigand::find<brigand::reverse<Sequence>, Predicate>>;
+}
+template <typename Sequence, typename Predicate = detail::non_null>
+using reverse_find = typename ::brigand::lazy::reverse_find<Sequence, Predicate>::type;
+namespace detail
+{
+ template <typename Sequence, typename Predicate>
+ using find_size = size<brigand::find<Sequence, Predicate>>;
+ template <typename Sequence, typename Predicate>
+ using empty_find = bool_<find_size<Sequence, Predicate>::value == 0>;
+ template <typename Sequence, typename Predicate>
+ using non_empty_find = bool_<find_size<Sequence, Predicate>::value != 0>;
+}
+template <typename Sequence, typename Predicate = detail::non_null>
+using not_found = typename detail::empty_find<Sequence, Predicate>;
+template <typename Sequence, typename Predicate = detail::non_null>
+using found = typename detail::non_empty_find<Sequence, Predicate>;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <class L>
+ struct flatten_impl
+ {
+ using type = L;
+ };
+ template <template<class...> class L, class T>
+ struct flatten_element_impl
+ {
+ using type = L<T>;
+ };
+ template <template<class...> class L, class... Ts>
+ struct flatten_element_impl<L, L<Ts...>>
+ : append_impl<typename flatten_element_impl<L, Ts>::type...>
+ {
+ };
+ template <template<class...> class L, class... Ts>
+ struct flatten_impl<L<Ts...>>
+ : flatten_element_impl<L, L<Ts...>>
+ {
+ };
+}
+namespace lazy
+{
+ template <typename Sequence>
+ using flatten = typename detail::flatten_impl<Sequence>;
+}
+template <typename Sequence>
+using flatten = typename lazy::flatten<Sequence>::type;
+}
+namespace brigand { namespace detail
+{
+ template<class Functor, class State, class Sequence>
+ struct fold_impl
+ {
+ using type = State;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0>
+ struct fold_impl<Functor, State, Sequence<T0>>
+ {
+ using type = brigand::apply<Functor, State, T0>;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1>
+ struct fold_impl<Functor, State, Sequence<T0, T1>>
+ {
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,State, T0>, T1
+ >;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1, class T2>
+ struct fold_impl<Functor, State, Sequence<T0, T1, T2>>
+ {
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor, State, T0>, T1
+ >, T2
+ >;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1, class T2, class T3>
+ struct fold_impl<Functor, State, Sequence<T0, T1, T2, T3>>
+ {
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor, State, T0>, T1
+ >, T2
+ >, T3
+ >;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1, class T2, class T3, class T4>
+ struct fold_impl<Functor, State, Sequence<T0, T1, T2, T3, T4>>
+ {
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor, State, T0>, T1
+ >, T2
+ >, T3
+ >, T4
+ >;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1, class T2, class T3, class T4, class T5>
+ struct fold_impl<Functor, State, Sequence<T0, T1, T2, T3, T4, T5>>
+ {
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor, State, T0>, T1
+ >, T2
+ >, T3
+ >, T4
+ >, T5
+ >;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6>
+ struct fold_impl<Functor, State, Sequence<T0, T1, T2, T3, T4, T5, T6>>
+ {
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor, State, T0>, T1
+ >, T2
+ >, T3
+ >, T4
+ >, T5
+ >, T6
+ >;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7>
+ struct fold_impl<Functor, State, Sequence<T0, T1, T2, T3, T4, T5, T6, T7>>
+ {
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor, State, T0>, T1
+ >, T2
+ >, T3
+ >, T4
+ >, T5
+ >, T6
+ >, T7
+ >;
+ };
+ template<
+ class Functor, class State, template <class...> class Sequence,
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class... T>
+ struct fold_impl<Functor, State, Sequence<T0, T1, T2, T3, T4, T5, T6, T7, T...>>
+ : fold_impl<
+ Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ State, T0
+ >, T1
+ >, T2
+ >, T3
+ >, T4
+ >, T5
+ >, T6
+ >, T7
+ >,
+ Sequence<T...>
+ >
+ {};
+ template<typename Functor, typename State, typename Sequence>
+ struct reverse_fold_impl
+ {
+ using type = State;
+ };
+ template <typename Functor, typename State, template <typename...> class L, typename T, typename... Ts>
+ struct reverse_fold_impl<Functor, State, L<T, Ts...>>
+ {
+ using type =
+ brigand::apply<Functor, typename reverse_fold_impl<Functor, State, L<Ts...>>::type, T>;
+ };
+ template<
+ typename Functor, typename State, template <typename...> class L,
+ typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename... Ts>
+ struct reverse_fold_impl<Functor, State, L<T0, T1, T2, T3, T4, T5, T6, T7, Ts...>>{
+ using type = brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ brigand::apply<Functor,
+ typename reverse_fold_impl<Functor, State, L<Ts...>>::type, T7
+ >, T6
+ >, T5
+ >, T4
+ >, T3
+ >, T2
+ >, T1
+ >, T0
+ >;
+ };
+} }
+namespace brigand
+{
+namespace lazy
+{
+ template <class Sequence, class State, class Functor>
+ using fold = typename detail::fold_impl<Functor, State, Sequence>;
+ template <class Sequence, class State, class Functor>
+ using reverse_fold = typename detail::reverse_fold_impl<Functor, State, Sequence>;
+}
+template <class Sequence, class State, class Functor>
+using fold = typename ::brigand::lazy::fold<Sequence, State, Functor>::type;
+template <class Sequence, class State, class Functor>
+using reverse_fold = typename ::brigand::lazy::reverse_fold<Sequence, State, Functor>::type;
+}
+namespace brigand
+{
+ template<class F, class...Ts> F for_each_args(F f, Ts&&...a)
+ {
+ return (void)std::initializer_list<int>{((void)std::ref(f)(static_cast<Ts&&>(a)),0)...}, f;
+ }
+}
+namespace brigand
+{
+ namespace detail
+ {
+ template<template<class...> class List, typename... Elements, typename Functor>
+ Functor for_each_impl( List<Elements...>&&, Functor f )
+ {
+ return for_each_args( f, type_<Elements>()... );
+ }
+ }
+ template<typename List, typename Functor> Functor for_each( Functor f )
+ {
+ return detail::for_each_impl( List{}, f );
+ }
+}
+namespace brigand
+{
+namespace detail
+{
+ template <bool Found, class Sequence, typename Predicate, typename NotFoundType>
+ struct index_if_impl
+ {
+ using type = ::brigand::size_t<size<Sequence>::value -
+ size<::brigand::find<Sequence, Predicate>>::value>;
+ };
+ template <class Sequence, typename Predicate, typename NotFoundType>
+ struct index_if_impl<false, Sequence, Predicate, NotFoundType>
+ {
+ using type = NotFoundType;
+ };
+}
+template <class Sequence, class Predicate, class NotFoundType = no_such_type_>
+using index_if = typename detail::index_if_impl<::brigand::found<Sequence, Predicate>::value,
+ Sequence, Predicate, NotFoundType>::type;
+template <class Sequence, typename T>
+using index_of = index_if<Sequence, std::is_same<T, ::brigand::_1>>;
+}
+
+namespace brigand
+{
+ namespace detail
+ {
+ template<class T, class, class, T>
+ struct range_cat;
+#ifdef BRIGAND_COMP_MSVC
+ template<class T, T Start, T Int>
+ struct int_plus
+ {
+ using type = std::integral_constant<T, Start + Int>;
+ };
+#endif
+ template<class T, class... Ts, T... Ints, T Start>
+ struct range_cat<T, list<Ts...>, list<std::integral_constant<T, Ints>...>, Start>
+ {
+#ifdef BRIGAND_COMP_MSVC
+ using type = list<Ts..., typename int_plus<T, Start, Ints>::type...>;
+#else
+ using type = list<Ts..., std::integral_constant<T, Start + Ints>...>;
+#endif
+ };
+ template<class T, T Start, std::size_t N>
+ struct range_impl
+ : range_cat<
+ T,
+ typename range_impl<T, Start, N/2>::type,
+ typename range_impl<T, Start, N - N/2>::type,
+ N/2
+ >
+ {};
+ template<class T, T Start>
+ struct range_impl<T, Start, 1>
+ {
+ using type = list<std::integral_constant<T, Start>>;
+ };
+ template<class T, T Start>
+ struct range_impl<T, Start, 0>
+ {
+ using type = list<>;
+ };
+ template<class T, class, class, T>
+ struct reverse_range_cat;
+#ifdef BRIGAND_COMP_MSVC
+ template<class T, T Start, T Int>
+ struct int_minus
+ {
+ using type = std::integral_constant<T, Int - Start>;
+ };
+#endif
+ template<class T, class... Ts, T... Ints, T Start>
+ struct reverse_range_cat<T, list<Ts...>, list<std::integral_constant<T, Ints>...>, Start>
+ {
+#ifdef BRIGAND_COMP_MSVC
+ using type = list<Ts..., typename int_minus<T, Start, Ints>::type...>;
+#else
+ using type = list<Ts..., std::integral_constant<T, Ints - Start>...>;
+#endif
+ };
+ template<class T, T Start, std::size_t N>
+ struct reverse_range_impl
+ : reverse_range_cat<
+ T,
+ typename reverse_range_impl<T, Start, N/2>::type,
+ typename reverse_range_impl<T, Start, N - N/2>::type,
+ N/2
+ >
+ {
+ };
+ template<class T, T Start>
+ struct reverse_range_impl<T, Start, 1>
+ {
+ using type = list<std::integral_constant<T, Start>>;
+ };
+ template<class T, T Start>
+ struct reverse_range_impl<T, Start, 0>
+ {
+ using type = list<>;
+ };
+ template <class T, T Start, T Stop>
+ struct reverse_range_safe
+ {
+ static_assert(Start >= Stop, "Invalid parameters. reverse_range<> syntax is reverse_range<type, from, down_to>");
+ using type = typename reverse_range_impl<T, Start, Start-Stop>::type;
+ };
+ }
+ template<class T, T Start, T Stop>
+ using range = typename detail::range_impl<T, Start, Stop-Start>::type;
+ template<class T, T Start, T Stop>
+ using reverse_range = typename detail::reverse_range_safe<T, Start, Stop>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template<class, class T> struct unique_x_t
+ { operator type_<T> (); };
+ template<class Ints, class... Ts>
+ struct is_set_impl;
+ template<>
+ struct is_set_impl<list<>>
+ {
+ using type = std::true_type;
+ };
+ inline std::true_type true_fn(...);
+ template<class... Ints, class... Ts>
+ struct is_set_impl<list<Ints...>, Ts...>
+ {
+ struct Pack : unique_x_t<Ints, Ts>... {};
+ template<class... Us>
+ static auto is_set(Us...) -> decltype(true_fn(static_cast<Us>(Pack())...));
+ static std::false_type is_set(...);
+ using type = decltype(is_set(type_<Ts>()...));
+ };
+}
+ template<class... Ts>
+ using is_set = typename detail::is_set_impl<range<int, 0, sizeof...(Ts)>, Ts...>::type;
+}
+
+namespace brigand
+{
+#if defined(BRIGAND_COMP_GCC) || defined(BRIGAND_COMP_CLANG)
+ namespace lazy
+ {
+ template <typename L, typename Pred>
+ struct remove_if;
+ template <template <class...> class L, typename... Ts, typename Pred>
+ struct remove_if<L<Ts...>, Pred>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<::brigand::apply<Pred, Ts>::value, list<>, list<Ts>>::type...>
+ {
+ };
+ template <template <class...> class L, typename... Ts, template<typename...> class F>
+ struct remove_if<L<Ts...>, bind<F,_1>>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<F<Ts>::value, list<>, list<Ts>>::type...>
+ {
+ };
+ template <template <class...> class L, typename... Ts, template<typename...> class F>
+ struct remove_if<L<Ts...>, F<_1>>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<F<Ts>::type::value, list<>, list<Ts>>::type...>
+ {
+ };
+ }
+ namespace lazy
+ {
+ template <typename L, typename T>
+ struct remove;
+ template <template <class...> class L, typename... Ts, typename T>
+ struct remove<L<Ts...>, T>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<std::is_same<Ts, T>::value, list<>, list<Ts>>::type...>
+ {
+ };
+ }
+ namespace lazy
+ {
+ template <typename L, typename Pred>
+ struct filter;
+ template <template <class...> class L, typename... Ts, typename Pred>
+ struct filter<L<Ts...>, Pred>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<::brigand::apply<Pred, Ts>::value, list<Ts>, list<>>::type...>
+ {
+ };
+ template <template <class...> class L, typename... Ts, template<typename...> class F>
+ struct filter<L<Ts...>, bind<F, _1>>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<F<Ts>::value, list<Ts>, list<>>::type...>
+ {
+ };
+ template <template <class...> class L, typename... Ts, template<typename...> class F>
+ struct filter<L<Ts...>, F<_1>>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<F<Ts>::type::value, list<Ts>, list<>>::type...>
+ {
+ };
+ }
+#else
+namespace detail
+{
+ template <typename Pred, typename T, bool B>
+ struct empty_if_true : std::conditional<::brigand::apply<Pred, T>::value == B, list<>, list<T>>
+ {
+ };
+ template <template <typename...> class F, typename T, bool B>
+ struct empty_if_true<bind<F, _1>, T, B> : std::conditional<F<T>::value == B, list<>, list<T>>
+ {
+ };
+ template <template <typename...> class F, typename T, bool B>
+ struct empty_if_true<F<_1>, T, B> : std::conditional<F<T>::type::value == B, list<>, list<T>>
+ {
+ };
+}
+namespace lazy
+{
+ template <typename L, typename Pred>
+ struct remove_if;
+ template <template <class...> class L, typename... Ts, typename Pred>
+ struct remove_if<L<Ts...>, Pred>
+ : ::brigand::detail::append_impl<
+ L<>, typename ::brigand::detail::empty_if_true<Pred, Ts, true>::type...>
+ {
+ };
+}
+namespace lazy
+{
+ template <typename L, typename T>
+ struct remove;
+ template <template <class...> class L, typename... Ts, typename T>
+ struct remove<L<Ts...>, T>
+ : ::brigand::detail::append_impl<
+ L<>, typename std::conditional<std::is_same<Ts, T>::value, list<>, list<Ts>>::type...>
+ {
+ };
+}
+namespace lazy
+{
+ template <typename L, typename Pred>
+ struct filter;
+ template <template <class...> class L, typename... Ts, typename Pred>
+ struct filter<L<Ts...>, Pred>
+ : ::brigand::detail::append_impl<
+ L<>, typename ::brigand::detail::empty_if_true<Pred, Ts, false>::type...>
+ {
+ };
+}
+#endif
+template <typename L, typename Pred>
+using remove_if = typename lazy::remove_if<L, Pred>::type;
+template <typename L, typename T>
+using remove = typename lazy::remove<L, T>::type;
+template <typename L, typename Pred>
+using filter = typename lazy::filter<L, Pred>::type;
+}
+namespace brigand
+{
+template <class Seq, class Pred>
+using partition = pair<filter<Seq, Pred>, remove_if<Seq, Pred>>;
+}
+
+namespace brigand
+{
+ namespace detail
+ {
+ template <typename T, typename Pred, typename NewType>
+ struct replacer : std::conditional<::brigand::apply<Pred, T>::value, NewType, T>
+ {
+ };
+ template <typename T, template <typename...> class F, typename NewType>
+ struct replacer<T, bind<F, _1>, NewType> : std::conditional<F<T>::value, NewType, T>
+ {
+ };
+ template <typename T, template <typename...> class F, typename NewType>
+ struct replacer<T, F<_1>, NewType> : std::conditional<F<T>::type::value, NewType, T>
+ {
+ };
+ }
+namespace lazy
+{
+ template <typename Sequence, typename Predicate, typename NewType>
+ struct replace_if;
+ template <template <typename...> class S, typename... Ts, typename Predicate, typename NewType>
+ struct replace_if<S<Ts...>, Predicate, NewType>
+ {
+ using type = S<typename detail::replacer<Ts, Predicate, NewType>::type...>;
+ };
+ template <typename Sequence, typename OldType, typename NewType>
+ using replace = replace_if<Sequence, std::is_same<_1, pin<OldType>>, NewType>;
+}
+template <typename Sequence, typename Predicate, typename NewType>
+using replace_if = typename ::brigand::lazy::replace_if<Sequence, Predicate, NewType>::type;
+template <typename Sequence, typename OldType, typename NewType>
+using replace = typename ::brigand::lazy::replace<Sequence, OldType, NewType>::type;
+}
+
+namespace brigand
+{
+ template<typename C, typename T, typename F>
+ inline typename std::enable_if<C::value,T&&>::type select(T&& t, F&&)
+ {
+ return std::forward<T>(t);
+ }
+ template<typename C, typename T, typename F>
+ inline typename std::enable_if<!C::value,F&&>::type select(T&&, F&& f)
+ {
+ return std::forward<F>(f);
+ }
+}
+namespace brigand
+{
+namespace detail
+{
+ template<typename TOut, typename TCurrent, typename TDelim, typename... Ts>
+ struct split_impl;
+ template<template<typename...> class L, typename... Os, typename... Cs, typename TDelim, typename T, typename... Ts>
+ struct split_impl<L<Os...>, L<Cs...>, TDelim, T, Ts...> :
+ split_impl<L<Os...>, L<Cs..., T>, TDelim, Ts...> {};
+ template<template<typename...> class L, typename... Os, typename... Cs, typename TDelim, typename T>
+ struct split_impl<L<Os...>, L<Cs...>, TDelim, T> {
+ using type = L<Os..., L<Cs..., T>>;
+ };
+ template<template<typename...> class L, typename... Os, typename... Cs, typename TDelim, typename... Ts>
+ struct split_impl<L<Os...>, L<Cs...>, TDelim, TDelim, Ts...> :
+ split_impl<L<Os..., L<Cs...>>, L<>, TDelim, Ts...> {};
+ template<template<typename...> class L, typename... Os, typename... Cs, typename TDelim>
+ struct split_impl<L<Os...>, L<Cs...>, TDelim, TDelim> {
+ using type = L<Os..., L<Cs...>>;
+ };
+ template<template<typename...> class L, typename... Os, typename TDelim, typename... Ts>
+ struct split_impl<L<Os...>, L<>, TDelim, TDelim, Ts...> :
+ split_impl<L<Os...>, L<>, TDelim, Ts...> {};
+ template<template<typename...> class L, typename... Os, typename TDelim>
+ struct split_impl<L<Os...>, L<>, TDelim, TDelim> {
+ using type = L<Os...>;
+ };
+ template<template<typename...> class L, typename... Os, typename TDelim>
+ struct split_impl<L<Os...>, L<>, TDelim> {
+ using type = L<Os...>;
+ };
+ template<typename TList, typename TDelim>
+ struct split_helper;
+ template<template<typename...> class L, typename T, typename... Ts, typename TDelim>
+ struct split_helper<L<T,Ts...>, TDelim> : split_impl<L<>, L<>, TDelim, T, Ts...>{};
+ template<template<typename...> class L, typename... T, typename TDelim>
+ struct split_helper<L<T...>, TDelim> {
+ using type = L<>;
+ };
+}
+namespace lazy
+{
+ template<typename TList, typename TDelim>
+ using split = detail::split_helper<TList, TDelim>;
+}
+template<typename TList, typename TDelim>
+using split = typename lazy::split<TList, TDelim>::type;
+}
+namespace brigand
+{
+ template <typename A, typename B>
+ struct less : bool_ < (A::value < B::value) > {};
+}
+
+namespace brigand
+{
+ namespace detail
+ {
+ template<class L, class Seq1, class Seq2, class Comp>
+ struct merge_impl;
+ template<bool, class L, class Seq1, class Seq2, class Comp>
+ struct merge_insert;
+ template<class... R, class T0, class T1, class... Ts, class U, class... Us, class Comp>
+ struct merge_insert<true, list<R...>, list<T0,T1,Ts...>, list<U,Us...>, Comp>
+ : merge_insert<::brigand::apply<Comp,T1,U>::value, list<R...,T0>, list<T1,Ts...>, list<U,Us...>, Comp>
+ {};
+ template<class... R, class T, class U, class... Us, class Comp>
+ struct merge_insert<true, list<R...>, list<T>, list<U,Us...>, Comp>
+ {
+ using list = ::brigand::list<R...,T>;
+ using left = ::brigand::list<>;
+ using right = ::brigand::list<U,Us...>;
+ };
+ template<class... R, class T, class... Ts, class U0, class U1, class... Us, class Comp>
+ struct merge_insert<false, list<R...>, list<T,Ts...>, list<U0,U1,Us...>, Comp>
+ : merge_insert<::brigand::apply<Comp,T,U1>::value, list<R...,U0>, list<T,Ts...>, list<U1,Us...>, Comp>
+ {};
+ template<class... R, class T, class... Ts, class U, class Comp>
+ struct merge_insert<false, list<R...>, list<T,Ts...>, list<U>, Comp>
+ {
+ using list = ::brigand::list<R...,U>;
+ using left = ::brigand::list<T,Ts...>;
+ using right = ::brigand::list<>;
+ };
+ template<
+ class... R,
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8, class T9, class... Ts,
+ class U0, class U1, class U2, class U3, class U4, class U5, class U6, class U7, class U8, class U9, class... Us, class Comp>
+ struct merge_impl<list<R...>, list<T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,Ts...>, list<U0,U1,U2,U3,U4,U5,U6,U7,U8,U9,Us...>, Comp>
+ {
+ using sub = merge_insert<::brigand::apply<Comp,T0,U0>::value, list<>, list<T0,T1,T2,T3,T4,T5,T6,T7,T8,T9>, list<U0,U1,U2,U3,U4,U5,U6,U7,U8,U9>, Comp>;
+ using type = typename merge_impl<
+ append<list<R...>, typename sub::list>,
+ append<typename sub::left, list<Ts...>>,
+ append<typename sub::right, list<Us...>>,
+ Comp
+ >::type;
+ };
+ template<class... R, class T, class... Ts, class U, class... Us, class Comp>
+ struct merge_impl<list<R...>, list<T,Ts...>, list<U,Us...>, Comp>
+ : std::conditional<
+ ::brigand::apply<Comp,T,U>::value,
+ merge_impl<list<R...,T>, list<Ts...>, list<U,Us...>, Comp>,
+ merge_impl<list<R...,U>, list<T,Ts...>, list<Us...>, Comp>
+ >::type
+ {};
+ template<class... R, class... Ts, class Comp>
+ struct merge_impl<list<R...>, list<Ts...>, list<>, Comp>
+ {
+ using type = list<R..., Ts...>;
+ };
+ template<class... R, class... Us, class Comp>
+ struct merge_impl<list<R...>, list<>, list<Us...>, Comp>
+ {
+ using type = list<R..., Us...>;
+ };
+ template<class... R, class Comp>
+ struct merge_impl<list<R...>, list<>, list<>, Comp>
+ {
+ using type = list<R...>;
+ };
+ }
+ template<class Seq1, class Seq2, class Comp = less<_1,_2>>
+ using merge = append<clear<Seq1>, typename detail::merge_impl<list<>, wrap<Seq1, list>, wrap<Seq2, list>, Comp>::type>;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <class Ls, class Seq, typename Comp>
+ struct sort_impl;
+ template<class L, class Comp>
+ struct mini_sort;
+ template<class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8, class... Ts, class Comp>
+ struct mini_sort<list<T0, T1, T2, T3, T4, T5, T6, T7, T8, Ts...>, Comp>
+ : merge_impl<
+ list<>,
+ typename mini_sort<list<T0, T1, T2, T3, T4, T5, T6, T7>, Comp>::type,
+ typename mini_sort<list<T8, Ts...>, Comp>::type, Comp>
+ {};
+ template<class T0, class T1, class T2, class T3, class T4, class... Ts, class Comp>
+ struct mini_sort<list<T0, T1, T2, T3, T4, Ts...>, Comp>
+ : merge_impl<list<>, typename mini_sort<list<T0, T1, T2, T3>, Comp>::type, typename mini_sort<list<T4, Ts...>, Comp>::type, Comp>
+ {};
+ template<class T0, class T1, class T2, class T3, class Comp>
+ struct mini_sort<list<T0, T1, T2, T3>, Comp>
+ : merge_impl<list<>, typename mini_sort<list<T0, T1>, Comp>::type, typename mini_sort<list<T2, T3>, Comp>::type, Comp>
+ {};
+ template<class T0, class T1, class T2, class Comp>
+ struct mini_sort<list<T0, T1, T2>, Comp>
+ : merge_impl<list<>, typename mini_sort<list<T0, T1>, Comp>::type, list<T2>, Comp>
+ {};
+ template<class T0, class T1, class Comp>
+ struct mini_sort<list<T0, T1>, Comp>
+ {
+ using type = typename std::conditional<::brigand::apply<Comp, T0, T1>::value, list<T0, T1>, list<T1, T0>>::type;
+ };
+ template<class T0, class Comp>
+ struct mini_sort<list<T0>, Comp>
+ {
+ using type = list<T0>;
+ };
+ template <
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6,
+ class T7, class T8, class T9, class T10, class T11, class T12, class T13,
+ class T14, class T15, class T16, class T17, class... Ts, typename Comp>
+ struct sort_impl<list<>, list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, Ts...>, Comp>
+ : sort_impl<
+ list<typename mini_sort<list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>, Comp>::type>,
+ list<Ts...>, Comp>
+ {};
+ template <
+ class L0,
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6,
+ class T7, class T8, class T9, class T10, class T11, class T12, class T13,
+ class T14, class T15, class T16, class T17, class... Ts, typename Comp>
+ struct sort_impl<list<L0>, list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, Ts...>, Comp>
+ : sort_impl<
+ list<L0, typename mini_sort<list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>, Comp>::type>,
+ list<Ts...>, Comp>
+ {};
+ template <
+ class L0, class L1,
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6,
+ class T7, class T8, class T9, class T10, class T11, class T12, class T13,
+ class T14, class T15, class T16, class T17, class... Ts, typename Comp>
+ struct sort_impl<list<L0,L1>, list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, Ts...>, Comp>
+ : sort_impl<
+ list<L0, L1, typename mini_sort<list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>, Comp>::type>,
+ list<Ts...>, Comp>
+ {};
+ template <
+ class L0, class L1, class L2,
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6,
+ class T7, class T8, class T9, class T10, class T11, class T12, class T13,
+ class T14, class T15, class T16, class T17, class... Ts, typename Comp>
+ struct sort_impl<list<L0,L1,L2>, list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, Ts...>, Comp>
+ : sort_impl<
+ list<
+ merge<L0, L1, Comp>,
+ merge<
+ typename mini_sort<list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>, Comp>::type, L2, Comp>>,
+ list<Ts...>, Comp>
+ {};
+ template <class T, class... Ts, typename Comp>
+ struct sort_impl<list<>, list<T, Ts...>, Comp>
+ {
+ using type = typename mini_sort<list<T, Ts...>, Comp>::type;
+ };
+ template <class L, class T, class... Ts, typename Comp>
+ struct sort_impl<list<L>, list<T, Ts...>, Comp>
+ {
+ using type = merge<typename mini_sort<list<T, Ts...>, Comp>::type, L, Comp>;
+ };
+ template <class L0, class L1, class T, class... Ts, typename Comp>
+ struct sort_impl<list<L0, L1>, list<T, Ts...>, Comp>
+ {
+ using type = merge<L0, merge<typename mini_sort<list<T, Ts...>, Comp>::type, L1, Comp>, Comp>;
+ };
+ template <class L0, class L1, class L2, class T, class... Ts, typename Comp>
+ struct sort_impl<list<L0, L1, L2>, list<T, Ts...>, Comp>
+ {
+ using type = merge<merge<L0, L1, Comp>, merge<typename mini_sort<list<T, Ts...>, Comp>::type, L2, Comp>, Comp>;
+ };
+ template <class L, typename Comp>
+ struct sort_impl<list<L>, list<>, Comp>
+ {
+ using type = L;
+ };
+ template <class L0, class L1, typename Comp>
+ struct sort_impl<list<L0, L1>, list<>, Comp>
+ {
+ using type = merge<L0,L1,Comp>;
+ };
+ template <class L0, class L1, class L2, typename Comp>
+ struct sort_impl<list<L0,L1,L2>, list<>, Comp>
+ {
+ using type = merge<merge<L0,L1,Comp>,L2,Comp>;
+ };
+ template <typename Comp>
+ struct sort_impl<list<>, list<>, Comp>
+ {
+ using type = list<>;
+ };
+ template <
+ class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8, class T9, class T10, class T11, class T12,
+ class T13, class T14, class T15, class T16, class T17, class T18, class T19, class T20, class T21, class T22, class T23, class T24,
+ class T25, class T26, class T27, class T28, class T29, class T30, class T31, class T32, class T33, class T34, class T35, class T36,
+ class T37, class T38, class T39, class T40, class T41, class T42, class T43, class T44, class T45, class T46, class T47, class T48,
+ class T49, class T50, class T51, class T52, class T53, class T54, class T55, class T56, class T57, class T58, class T59, class T60,
+ class T61, class T62, class T63, class T64, class T65, class T66, class T67, class T68, class T69, class T70, class T71, class T72,
+ class T73, class T74, class T75, class T76, class T77, class T78, class T79, class T80, class T81, class T82, class T83, class T84,
+ class T85, class T86, class T87, class T88, class T89, class T90, class T91, class T92, class T93, class T94, class T95, class T96,
+ class T97, class T98, class T99, class T100, class T101, class T102, class T103, class T104, class T105, class T106, class T107,
+ class T108, class T109, class T110, class T111, class T112, class T113, class T114, class T115, class T116, class T117, class T118,
+ class T119, class T120, class T121, class T122, class T123, class T124, class T125, class T126, class T127, class T128, class T129,
+ class T130, class T131, class T132, class T133, class T134, class T135, class T136, class T137, class T138, class T139, class T140,
+ class T141, class T142, class T143, class T144, class T145, class T146, class T147, class T148, class T149, class T150, class T151,
+ class T152, class T153, class T154, class T155, class T156, class T157, class T158, class T159, class T160, class T161, class T162,
+ class T163, class T164, class T165, class T166, class T167, class T168, class T169, class T170, class T171, class T172, class T173,
+ class T174, class T175, class T176, class T177, class T178, class T179, class T180, class T181, class T182, class T183, class T184,
+ class T185, class T186, class T187, class T188, class T189, class T190, class T191, class T192, class T193, class T194, class T195,
+ class T196, class T197, class T198, class T199, class T200, class T201, class T202, class T203, class T204, class T205, class T206,
+ class T207, class T208, class T209, class T210, class T211, class T212, class T213, class T214, class T215, class T216, class T217,
+ class T218, class T219, class T220, class T221, class T222, class T223, class T224, class T225, class T226, class T227, class T228,
+ class T229, class T230, class T231, class T232, class T233, class T234, class T235, class T236, class T237, class T238, class T239,
+ class T240, class T241, class T242, class T243, class T244, class T245, class T246, class T247, class T248, class T249, class T250,
+ class T251, class T252, class T253, class T254, class T255, typename... Ts, typename Comp>
+ struct sort_impl<list<>, list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21,
+ T22, T23, T24, T25, T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45, T46, T47,
+ T48, T49, T50, T51, T52, T53, T54, T55, T56, T57, T58, T59, T60, T61, T62, T63, T64, T65, T66, T67, T68, T69, T70, T71, T72, T73,
+ T74, T75, T76, T77, T78, T79, T80, T81, T82, T83, T84, T85, T86, T87, T88, T89, T90, T91, T92, T93, T94, T95, T96, T97, T98, T99,
+ T100, T101, T102, T103, T104, T105, T106, T107, T108, T109, T110, T111, T112, T113, T114, T115, T116, T117, T118, T119, T120, T121,
+ T122, T123, T124, T125, T126, T127, T128, T129, T130, T131, T132, T133, T134, T135, T136, T137, T138, T139, T140, T141, T142, T143,
+ T144, T145, T146, T147, T148, T149, T150, T151, T152, T153, T154, T155, T156, T157, T158, T159, T160, T161, T162, T163, T164, T165,
+ T166, T167, T168, T169, T170, T171, T172, T173, T174, T175, T176, T177, T178, T179, T180, T181, T182, T183, T184, T185, T186, T187,
+ T188, T189, T190, T191, T192, T193, T194, T195, T196, T197, T198, T199, T200, T201, T202, T203, T204, T205, T206, T207, T208, T209,
+ T210, T211, T212, T213, T214, T215, T216, T217, T218, T219, T220, T221, T222, T223, T224, T225, T226, T227, T228, T229, T230, T231,
+ T232, T233, T234, T235, T236, T237, T238, T239, T240, T241, T242, T243, T244, T245, T246, T247, T248, T249, T250, T251, T252, T253,
+ T254, T255, Ts...>, Comp>
+ {
+ using type = merge<
+ typename sort_impl<list<>, list<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20,
+ T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, T49, T50, T51, T52, T53, T54, T55, T56, T57, T58, T59, T60, T61, T62, T63, T64, T65, T66, T67, T68, T69, T70,
+ T71, T72, T73, T74, T75, T76, T77, T78, T79, T80, T81, T82, T83, T84, T85, T86, T87, T88, T89, T90, T91, T92, T93, T94, T95,
+ T96, T97, T98, T99, T100, T101, T102, T103, T104, T105, T106, T107, T108, T109, T110, T111, T112, T113, T114, T115, T116,
+ T117, T118, T119, T120, T121, T122, T123, T124, T125, T126, T127, T128, T129, T130, T131, T132, T133, T134, T135, T136, T137,
+ T138, T139, T140, T141, T142, T143, T144, T145, T146, T147, T148, T149, T150, T151, T152, T153, T154, T155, T156, T157, T158,
+ T159, T160, T161, T162, T163, T164, T165, T166, T167, T168, T169, T170, T171, T172, T173, T174, T175, T176, T177, T178, T179,
+ T180, T181, T182, T183, T184, T185, T186, T187, T188, T189, T190, T191, T192, T193, T194, T195, T196, T197, T198, T199, T200,
+ T201, T202, T203, T204, T205, T206, T207, T208, T209, T210, T211, T212, T213, T214, T215, T216, T217, T218, T219, T220, T221,
+ T222, T223, T224, T225, T226, T227, T228, T229, T230, T231, T232, T233, T234, T235, T236, T237, T238, T239, T240, T241, T242,
+ T243, T244, T245, T246, T247, T248, T249, T250, T251, T252, T253, T254>, Comp>::type,
+ typename sort_impl<list<>, list<T255, Ts...>, Comp>::type, Comp
+ >;
+ };
+}
+template <class Seq, class Comp = less<_1,_2>>
+using sort = append<clear<Seq>, typename detail::sort_impl<list<>, wrap<Seq, list>, Comp>::type>;
+}
+
+namespace brigand
+{
+ template <typename A>
+ struct complement : std::integral_constant< typename A::value_type
+ , typename A::value_type(~A::value)
+ > {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct divides : std::integral_constant < typename A::value_type, A::value / B::value > {};
+}
+namespace brigand
+{
+ template<class T>
+ struct identity
+ {
+ using type = T;
+ };
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct max : std::integral_constant < typename A::value_type
+ , (A::value < B::value) ? B::value : A::value
+ >
+ {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct min : std::integral_constant < typename A::value_type
+ , (A::value < B::value) ? A::value : B::value
+ >
+ {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct minus : std::integral_constant < typename A::value_type, A::value - B::value > {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct modulo : std::integral_constant < typename A::value_type, A::value % B::value > {};
+}
+
+namespace brigand
+{
+ template <typename A>
+ struct negate : std::integral_constant < typename A::value_type, -A::value > {};
+}
+
+namespace brigand
+{
+ template <typename A>
+ struct next : std::integral_constant < typename A::value_type, A::value + 1 > {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct plus : std::integral_constant < typename A::value_type, A::value + B::value > {};
+}
+
+namespace brigand
+{
+ template <typename A>
+ struct prev : std::integral_constant < typename A::value_type, A::value - 1 > {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct times : std::integral_constant < typename A::value_type, A::value * B::value > {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct bitand_ : std::integral_constant<typename A::value_type, A::value & B::value> {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct bitor_ : std::integral_constant<typename A::value_type, A::value | B::value> {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct bitxor_ : std::integral_constant<typename A::value_type, A::value ^ B::value> {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct shift_left : std::integral_constant<typename A::value_type, (A::value << B::value)> {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct shift_right : std::integral_constant<typename A::value_type, (A::value >> B::value)> {};
+}
+namespace brigand
+{
+ template <typename A, typename B>
+ struct equal_to : bool_ < (A::value == B::value) > {};
+}
+namespace brigand
+{
+ template <typename A, typename B>
+ struct greater : bool_<(A::value > B::value) > {};
+}
+namespace brigand
+{
+ template <typename A, typename B>
+ struct greater_equal : bool_ < (A::value >= B::value) > {};
+}
+namespace brigand
+{
+ template <typename A, typename B>
+ struct less_equal : bool_ < (A::value <= B::value) > {};
+}
+namespace brigand
+{
+ template <typename A, typename B>
+ struct not_equal_to : bool_ < (A::value != B::value) > {};
+}
+
+namespace brigand
+{
+ template <typename Condition, typename A, typename B>
+ struct eval_if
+ {
+ using type = typename std::conditional<Condition::value, A, B>::type::type;
+ };
+ template <bool Condition, typename A, typename B>
+ struct eval_if_c
+ {
+ using type = typename std::conditional<Condition, A, B>::type::type;
+ };
+}
+
+namespace brigand
+{
+ template <typename Condition, typename A, typename B>
+ struct if_ : std::conditional<Condition::value, A, B> {};
+ template <bool Condition, typename A, typename B>
+ struct if_c : std::conditional<Condition, A, B> {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct and_ : std::integral_constant <typename A::value_type, A::value && B::value > {};
+}
+
+namespace brigand
+{
+ template <typename T>
+ struct not_ : std::integral_constant<typename T::value_type, !T::value> {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct or_ : std::integral_constant < typename A::value_type, A::value || B::value > {};
+}
+
+namespace brigand
+{
+ template <typename A, typename B>
+ struct xor_ : std::integral_constant<typename A::value_type, A::value != B::value> {};
+}
+namespace brigand
+{
+ template<class T>
+ struct always
+ {
+ using type = T;
+ };
+}
+namespace brigand
+{
+namespace detail
+{
+ template<template<class> class F, unsigned N, class T>
+ struct repeat_impl
+ : repeat_impl<F, N-7, F<F<F<F<F<F<F<T>>>>>>>>
+ {};
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 7, T>
+ {
+ using type = F<F<F<F<F<F<F<T>>>>>>>;
+ };
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 6, T>
+ {
+ using type = F<F<F<F<F<F<T>>>>>>;
+ };
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 5, T>
+ {
+ using type = F<F<F<F<F<T>>>>>;
+ };
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 4, T>
+ {
+ using type = F<F<F<F<T>>>>;
+ };
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 3, T>
+ {
+ using type = F<F<F<T>>>;
+ };
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 2, T>
+ {
+ using type = F<F<T>>;
+ };
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 1, T>
+ {
+ using type = F<T>;
+ };
+ template<template<class> class F, class T>
+ struct repeat_impl<F, 0, T>
+ {
+ using type = T;
+ };
+}
+namespace lazy
+{
+ template<template<class> class F, class N, class T>
+ using repeat = typename detail::repeat_impl<F, N::value, T>;
+}
+ template<template<class> class F, class N, class T>
+ using repeat = typename ::brigand::lazy::repeat<F, N, T>::type;
+}
+
+namespace brigand
+{
+ template<typename T>
+ struct sizeof_ : std::integral_constant <std::size_t, sizeof(T)> {};
+}
+namespace brigand
+{
+namespace detail
+{
+ template<class C, class K>
+ struct has_key_impl
+ {
+ using type = decltype(C::has_key(type_<K>{}));
+ };
+}
+ template<class L, class K>
+ using has_key = typename detail::has_key_impl<L, K>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template<class Start, unsigned N, class Next, class... E>
+ struct mksq8
+ : mksq8<brigand::apply<Next, Start>, N-1, Next, E..., Start>
+ {};
+ template<class Start, class Next, class... E>
+ struct mksq8<Start, 0, Next, E...>
+ {
+ using type = list<E...>;
+ };
+ template<class Start, class Next, class... E>
+ struct mksq8<Start, 1, Next, E...>
+ {
+ using type = list<E..., Start>;
+ };
+ template<class Start, class Next>
+ struct mksq8<Start, 8, Next>
+ {
+ using t1 = brigand::apply<Next, Start>;
+ using t2 = brigand::apply<Next, t1>;
+ using t3 = brigand::apply<Next, t2>;
+ using t4 = brigand::apply<Next, t3>;
+ using t5 = brigand::apply<Next, t4>;
+ using t6 = brigand::apply<Next, t5>;
+ using t7 = brigand::apply<Next, t6>;
+ using type = list<Start, t1, t2, t3, t4, t5, t6, t7>;
+ };
+ template<template<class...> class List, class Start, unsigned N, class Next, bool, class... L>
+ struct make_sequence_impl
+ : make_sequence_impl<
+ List,
+ brigand::apply<Next, typename mksq8<Start, 8, Next>::t7>,
+ N-8,
+ Next,
+ (N-8<=8),
+ L...,
+ typename mksq8<Start, 8, Next>::type
+ >
+ {};
+ template<template<class...> class List, class Start, unsigned N, class Next, class... L>
+ struct make_sequence_impl<List, Start, N, Next, true, L...>
+ {
+ using type = append<List<>, L..., typename mksq8<Start, N, Next>::type>;
+ };
+}
+ template<class Start, unsigned N, class Next = next<_1>, template<class...> class List = list>
+ using make_sequence = typename detail::make_sequence_impl<List, Start, N, Next, (N<=8)>::type;
+}
+
+namespace brigand
+{
+ template<class L, std::size_t Index>
+ using erase_c = append<
+ front<split_at<L, size_t<Index>>>,
+ pop_front<back<split_at<L, size_t<Index>>>>
+ >;
+namespace detail
+{
+ template <typename T>
+ struct has_erase_method
+ {
+ struct dummy {};
+ template <typename C, typename P>
+ static auto test(P * p) -> decltype(C::erase(type_<P>{}), std::true_type());
+ template <typename, typename>
+ static std::false_type test(...);
+ static const bool value = std::is_same<std::true_type, decltype(test<T, dummy>(nullptr))>::value;
+ };
+ template<class L, class I, bool>
+ struct erase_dispatch
+ {
+ using type = erase_c<L, I::value>;
+ };
+ template<class C, class K>
+ struct erase_dispatch<C, K, true>
+ {
+ using type = decltype(C::erase(type_<K>{}));
+ };
+}
+ template<class L, class K>
+ using erase = typename detail::erase_dispatch<L, K, detail::has_erase_method<L>::value>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <class C, class T>
+ struct insert_impl
+ {
+ using type = decltype(C::insert(type_<T>{}));
+ };
+}
+ template<class L, class T>
+ using insert = typename detail::insert_impl<L, T>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <class L, class K>
+ struct contains_impl
+ {
+ using type = decltype(L::contains(type_<K>{}));
+ };
+}
+ template <class L, class K>
+ using contains = typename detail::contains_impl<L, K>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template<class... Ts>
+ struct make_set;
+ template<class U, class K>
+ struct set_erase_pred_impl
+ {
+ using type = list<U>;
+ };
+ template<class K>
+ struct set_erase_pred_impl<K,K>
+ {
+ using type = list<>;
+ };
+ template <class... T>
+ struct set_impl
+ {
+ template <typename K, typename = decltype(static_cast<type_<K>*>(static_cast<make_set<T...>*>(nullptr)))>
+ static std::true_type contains(type_<K>);
+ template <typename K>
+ static std::false_type contains(K);
+ template <typename K, typename = decltype(static_cast<type_<K>*>(static_cast<make_set<T...>*>(nullptr)))>
+ static std::true_type has_key(type_<K>);
+ template <typename K>
+ static std::false_type has_key(K);
+ template <class K>
+ static append<set_impl<>, typename set_erase_pred_impl<T, K>::type...> erase(type_<K>);
+ template<class K, class = decltype(static_cast<type_<K>*>(static_cast<make_set<T...>*>(nullptr)))>
+ static set_impl insert(type_<K>);
+ template<class K>
+ static set_impl<T..., typename K::type> insert(K);
+ };
+ template<class... Ts>
+ struct make_set : type_<Ts>...
+ {
+ using type = set_impl<Ts...>;
+ };
+}
+ template<class... Ts>
+ using set = typename detail::make_set<Ts...>::type;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <typename Pair>
+ struct get_second {
+ using type = typename Pair::second_type;
+ };
+}
+template <typename Map, template <class...> class Sequence = brigand::list>
+using values_as_sequence = transform<as_sequence<Map, Sequence>, detail::get_second<_1>>;
+}
+namespace brigand
+{
+namespace detail
+{
+ template <typename Pair>
+ struct get_first {
+ using type = typename Pair::first_type;
+ };
+}
+template <typename Map, template <class...> class Sequence = brigand::set>
+using keys_as_sequence = transform<as_sequence<Map, Sequence>, detail::get_first<_1>>;
+}
+namespace brigand
+{
+ struct empty_base {};
+}
+namespace brigand
+{
+ template<typename T, typename R = void > struct has_type
+ {
+ using type = R;
+ };
+}
+namespace brigand
+{
+ template<typename... Ts> struct inherit;
+ template<typename T> struct inherit<T>
+ {
+ struct type : public T {};
+ };
+ template<> struct inherit<>
+ {
+ using type = empty_base;
+ };
+ template<> struct inherit<empty_base>
+ {
+ using type = empty_base;
+ };
+ template<typename T1, typename T2> struct inherit<T1,T2>
+ {
+ struct type : public T1, T2 {};
+ };
+ template<typename T1> struct inherit<T1,empty_base>
+ {
+ using type = T1;
+ };
+ template<typename T2> struct inherit<empty_base,T2>
+ {
+ using type = T2;
+ };
+ template<> struct inherit<empty_base,empty_base>
+ {
+ using type = empty_base;
+ };
+ template<typename T1, typename T2, typename T3, typename... Ts>
+ struct inherit<T1, T2, T3, Ts...>
+ : inherit<T1, typename inherit<T2,typename inherit<T3, Ts...>::type>::type>
+ {};
+}
+namespace brigand
+{
+ namespace lazy
+ {
+ template< typename Types
+ , typename Node
+ , typename Root = brigand::empty_base
+ >
+ struct inherit_linearly;
+ template< typename Types
+ , template<typename...> class Node, typename...Ts
+ , typename Root
+ >
+ struct inherit_linearly<Types,Node<Ts...>,Root>
+ {
+ using type = brigand::fold<Types,Root,bind<Node,Ts...>>;
+ };
+ }
+ template< typename Types
+ , typename Node
+ , typename Root = brigand::empty_base
+ >
+ using inherit_linearly = typename lazy::inherit_linearly<Types,Node,Root>::type;
+}
+
+namespace brigand
+{
+ template<typename RealType, typename Type, Type Value>
+ struct real_ : std::integral_constant<Type,Value>
+ {
+ using value_type = RealType;
+ using parent = std::integral_constant<Type,Value>;
+ inline operator value_type() const
+ {
+ value_type that;
+ std::memcpy(&that, &parent::value, sizeof(value_type));
+ return that;
+ }
+ };
+ template<std::uint32_t Value>
+ struct single_ : real_<float, std::uint32_t, Value> {};
+ template<std::uint64_t Value>
+ struct double_ : real_<double, std::uint64_t,Value> {};
+}
diff --git a/Source/WTF/wtf/BubbleSort.h b/Source/WTF/wtf/BubbleSort.h
new file mode 100644
index 000000000..4d522866b
--- /dev/null
+++ b/Source/WTF/wtf/BubbleSort.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BubbleSort_h
+#define BubbleSort_h
+
+namespace WTF {
+
+// Why would you want to use bubble sort? When you know that your input is already mostly
+// sorted! This sort is guaranteed stable (it won't reorder elements that were equal), it
+// doesn't require any scratch memory, and is the fastest available sorting algorithm if your
+// input already happens to be sorted. This sort is also likely to have competetive performance
+// for small inputs, even if they are very unsorted.
+
+// We use this sorting algorithm for compiler insertion sets. An insertion set is usually very
+// nearly sorted. It shouldn't take more than a few bubbles to make it fully sorted. We made
+// this decision deliberately. Here's the performance of the testb3 Complex(64, 384) benchmark
+// with the Air::InsertionSet doing no sorting, std::stable_sorting, and bubbleSorting:
+//
+// no sort: 8.8222 +- 0.1911 ms.
+// std::stable_sort: 9.0135 +- 0.1418 ms.
+// bubbleSort: 8.8457 +- 0.1511 ms.
+//
+// Clearly, bubble sort is superior.
+//
+// Note that the critical piece here is that insertion sets tend to be small, they must be
+// sorted, the sort must be stable, they are usually already sorted to begin with, and when they
+// are unsorted it's usually because of a few out-of-place elements.
+
+template<typename IteratorType, typename LessThan>
+void bubbleSort(IteratorType begin, IteratorType end, const LessThan& lessThan)
+{
+ for (;;) {
+ bool changed = false;
+ ASSERT(end >= begin);
+ size_t limit = end - begin;
+ for (size_t i = limit; i-- > 1;) {
+ if (lessThan(begin[i], begin[i - 1])) {
+ std::swap(begin[i], begin[i - 1]);
+ changed = true;
+ }
+ }
+ if (!changed)
+ return;
+ // After one run, the first element in the list is guaranteed to be the smallest.
+ begin++;
+
+ // Now go in the other direction. This eliminates most sorting pathologies.
+ changed = false;
+ ASSERT(end >= begin);
+ limit = end - begin;
+ for (size_t i = 1; i < limit; ++i) {
+ if (lessThan(begin[i], begin[i - 1])) {
+ std::swap(begin[i], begin[i - 1]);
+ changed = true;
+ }
+ }
+ if (!changed)
+ return;
+ // Now the last element is guaranteed to be the largest.
+ end--;
+ }
+}
+
+template<typename IteratorType>
+void bubbleSort(IteratorType begin, IteratorType end)
+{
+ bubbleSort(
+ begin, end,
+ [](auto& left, auto& right) {
+ return left < right;
+ });
+}
+
+} // namespace WTF
+
+using WTF::bubbleSort;
+
+#endif // BubbleSort_h
+
diff --git a/Source/WTF/wtf/ByteOrder.h b/Source/WTF/wtf/ByteOrder.h
index 9d96ea6ed..d55350a3b 100644
--- a/Source/WTF/wtf/ByteOrder.h
+++ b/Source/WTF/wtf/ByteOrder.h
@@ -31,8 +31,6 @@
#ifndef WTF_ByteOrder_h
#define WTF_ByteOrder_h
-#include <wtf/Platform.h>
-
#if OS(UNIX)
#include <arpa/inet.h>
#endif
diff --git a/Source/WTF/wtf/CMakeLists.txt b/Source/WTF/wtf/CMakeLists.txt
new file mode 100644
index 000000000..356c927aa
--- /dev/null
+++ b/Source/WTF/wtf/CMakeLists.txt
@@ -0,0 +1,356 @@
+set(WTF_HEADERS
+ ASCIICType.h
+ Assertions.h
+ Atomics.h
+ AutomaticThread.h
+ BackwardsGraph.h
+ Bag.h
+ BagToHashMap.h
+ BitVector.h
+ Bitmap.h
+ Brigand.h
+ BubbleSort.h
+ BumpPointerAllocator.h
+ ByteOrder.h
+ ClockType.h
+ CompilationThread.h
+ Compiler.h
+ Condition.h
+ CrossThreadCopier.h
+ CrossThreadTask.h
+ CryptographicUtilities.h
+ CryptographicallyRandomNumber.h
+ CurrentTime.h
+ DataLog.h
+ DateMath.h
+ Dominators.h
+ DecimalNumber.h
+ DeferrableRefCounted.h
+ Deque.h
+ DisallowCType.h
+ DoublyLinkedList.h
+ FastMalloc.h
+ FeatureDefines.h
+ FilePrintStream.h
+ FlipBytes.h
+ Forward.h
+ FunctionDispatcher.h
+ GetPtr.h
+ GraphNodeWorklist.h
+ GregorianDateTime.h
+ HashCountedSet.h
+ Hasher.h
+ HashFunctions.h
+ HashIterators.h
+ HashMap.h
+ HashMethod.h
+ HashSet.h
+ HashTable.h
+ HashTraits.h
+ HexNumber.h
+ IndexMap.h
+ IndexSet.h
+ IndexSparseSet.h
+ IndexedContainerIterator.h
+ IteratorAdaptors.h
+ IteratorRange.h
+ ListHashSet.h
+ Lock.h
+ LockAlgorithm.h
+ LockedPrintStream.h
+ Locker.h
+ MD5.h
+ MainThread.h
+ MallocPtr.h
+ MathExtras.h
+ MediaTime.h
+ MemoryFootprint.h
+ MessageQueue.h
+ MetaAllocator.h
+ MetaAllocatorHandle.h
+ MonotonicTime.h
+ Noncopyable.h
+ NumberOfCores.h
+ OSAllocator.h
+ OSRandomSource.h
+ OptionSet.h
+ OrderMaker.h
+ PageAllocation.h
+ PageBlock.h
+ PageReservation.h
+ ParallelHelperPool.h
+ ParallelJobs.h
+ ParallelJobsGeneric.h
+ ParallelJobsLibdispatch.h
+ ParallelJobsOpenMP.h
+ ParallelVectorIterator.h
+ ParkingLot.h
+ PassRefPtr.h
+ Platform.h
+ PrintStream.h
+ ProcessID.h
+ RAMSize.h
+ RandomNumber.h
+ RandomNumberSeed.h
+ RangeSet.h
+ RawPointer.h
+ RecursiveLockAdapter.h
+ RedBlackTree.h
+ Ref.h
+ RefCounted.h
+ RefCountedLeakCounter.h
+ RefCounter.h
+ RefPtr.h
+ RetainPtr.h
+ RunLoop.h
+ SHA1.h
+ SharedTask.h
+ SaturatedArithmetic.h
+ ScopedLambda.h
+ Seconds.h
+ SegmentedVector.h
+ SmallPtrSet.h
+ StackBounds.h
+ StackStats.h
+ StaticConstructors.h
+ StdLibExtras.h
+ Stopwatch.h
+ StringExtras.h
+ StringPrintStream.h
+ SystemTracing.h
+ ThreadIdentifierDataPthreads.h
+ ThreadSafeRefCounted.h
+ ThreadSpecific.h
+ Threading.h
+ ThreadingPrimitives.h
+ TimeWithDynamicClockType.h
+ TinyPtrSet.h
+ UniqueRef.h
+ VMTags.h
+ ValueCheck.h
+ Variant.h
+ Vector.h
+ VectorTraits.h
+ WTFThreadData.h
+ WallTime.h
+ WeakPtr.h
+ WordLock.h
+ WorkQueue.h
+ dtoa.h
+
+ dtoa/bignum-dtoa.h
+ dtoa/bignum.h
+ dtoa/cached-powers.h
+ dtoa/diy-fp.h
+ dtoa/double-conversion.h
+ dtoa/double.h
+ dtoa/fast-dtoa.h
+ dtoa/fixed-dtoa.h
+ dtoa/strtod.h
+ dtoa/utils.h
+
+ text/AtomicString.h
+ text/AtomicStringImpl.h
+ text/AtomicStringTable.h
+ text/Base64.h
+ text/CString.h
+ text/IntegerToStringConversion.h
+ text/LChar.h
+ text/LineBreakIteratorPoolICU.h
+ text/StringBuffer.h
+ text/StringCommon.h
+ text/StringHash.h
+ text/StringImpl.h
+ text/StringView.h
+ text/SymbolImpl.h
+ text/SymbolRegistry.h
+ text/TextBreakIterator.h
+ text/TextBreakIteratorInternalICU.h
+ text/UniquedStringImpl.h
+ text/WTFString.h
+
+ text/icu/UTextProvider.h
+ text/icu/UTextProviderLatin1.h
+ text/icu/UTextProviderUTF16.h
+
+ threads/BinarySemaphore.h
+
+ unicode/CharacterNames.h
+ unicode/Collator.h
+ unicode/UTF8.h
+)
+
+set(WTF_SOURCES
+ Assertions.cpp
+ Atomics.cpp
+ AutomaticThread.cpp
+ BitVector.cpp
+ ClockType.cpp
+ CompilationThread.cpp
+ CrossThreadCopier.cpp
+ CryptographicUtilities.cpp
+ CryptographicallyRandomNumber.cpp
+ CurrentTime.cpp
+ DataLog.cpp
+ DateMath.cpp
+ DecimalNumber.cpp
+ FastBitVector.cpp
+ FastMalloc.cpp
+ FilePrintStream.cpp
+ FunctionDispatcher.cpp
+ GregorianDateTime.cpp
+ HashTable.cpp
+ Lock.cpp
+ LockedPrintStream.cpp
+ MD5.cpp
+ MainThread.cpp
+ MediaTime.cpp
+ MemoryFootprint.cpp
+ MetaAllocator.cpp
+ MonotonicTime.cpp
+ NumberOfCores.cpp
+ OSRandomSource.cpp
+ PageBlock.cpp
+ ParallelHelperPool.cpp
+ ParallelJobsGeneric.cpp
+ ParkingLot.cpp
+ PrintStream.cpp
+ RAMSize.cpp
+ RandomNumber.cpp
+ RefCountedLeakCounter.cpp
+ RunLoop.cpp
+ SHA1.cpp
+ Seconds.cpp
+ SixCharacterHash.cpp
+ StackBounds.cpp
+ StackStats.cpp
+ StringPrintStream.cpp
+ Threading.cpp
+ TimeWithDynamicClockType.cpp
+ WTFThreadData.cpp
+ WallTime.cpp
+ WordLock.cpp
+ WorkQueue.cpp
+ dtoa.cpp
+
+ dtoa/bignum-dtoa.cc
+ dtoa/bignum.cc
+ dtoa/cached-powers.cc
+ dtoa/diy-fp.cc
+ dtoa/double-conversion.cc
+ dtoa/fast-dtoa.cc
+ dtoa/fixed-dtoa.cc
+ dtoa/strtod.cc
+
+ persistence/Coders.cpp
+ persistence/Decoder.cpp
+ persistence/Encoder.cpp
+
+ text/AtomicString.cpp
+ text/AtomicStringImpl.cpp
+ text/AtomicStringTable.cpp
+ text/Base64.cpp
+ text/CString.cpp
+ text/StringBuilder.cpp
+ text/StringImpl.cpp
+ text/StringStatics.cpp
+ text/StringView.cpp
+ text/SymbolImpl.cpp
+ text/SymbolRegistry.cpp
+ text/TextBreakIterator.cpp
+ text/WTFString.cpp
+
+ text/icu/UTextProvider.cpp
+ text/icu/UTextProviderLatin1.cpp
+ text/icu/UTextProviderUTF16.cpp
+
+ threads/BinarySemaphore.cpp
+
+ unicode/UTF8.cpp
+)
+
+set(WTF_INCLUDE_DIRECTORIES
+ "${BMALLOC_DIR}"
+ "${WTF_DIR}"
+ "${CMAKE_BINARY_DIR}"
+ "${DERIVED_SOURCES_DIR}"
+ "${THIRDPARTY_DIR}"
+)
+
+set(WTF_PRIVATE_INCLUDE_DIRECTORIES
+ "${WTF_DIR}/wtf"
+ "${WTF_DIR}/wtf/dtoa"
+ "${WTF_DIR}/wtf/persistence"
+ "${WTF_DIR}/wtf/text"
+ "${WTF_DIR}/wtf/text/icu"
+ "${WTF_DIR}/wtf/threads"
+ "${WTF_DIR}/wtf/unicode"
+)
+
+set(WTF_LIBRARIES
+ ${CMAKE_DL_LIBS}
+)
+
+if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
+ list(APPEND WTF_HEADERS
+ spi/darwin/CommonCryptoSPI.h
+ )
+ list(APPEND WTF_INCLUDE_DIRECTORIES
+ "${WTF_DIR}/wtf/spi/darwin"
+ )
+endif ()
+
+if (NOT USE_SYSTEM_MALLOC)
+ list(APPEND WTF_LIBRARIES bmalloc)
+endif ()
+
+list(APPEND WTF_SOURCES
+ unicode/icu/CollatorICU.cpp
+)
+set(WTF_SYSTEM_INCLUDE_DIRECTORIES
+ ${ICU_INCLUDE_DIRS}
+)
+list(APPEND WTF_LIBRARIES
+ ${ICU_DATA_LIBRARIES}
+ ${ICU_I18N_LIBRARIES}
+ ${ICU_LIBRARIES}
+)
+
+if (WIN32)
+ list(APPEND WTF_SOURCES
+ OSAllocatorWin.cpp
+ ThreadSpecificWin.cpp
+ ThreadingWin.cpp
+ )
+else ()
+ list(APPEND WTF_HEADERS
+ ThreadIdentifierDataPthreads.h
+ )
+ list(APPEND WTF_SOURCES
+ OSAllocatorPosix.cpp
+ ThreadIdentifierDataPthreads.cpp
+ ThreadingPthreads.cpp
+ )
+endif ()
+
+WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS()
+
+WEBKIT_WRAP_SOURCELIST(${WTF_SOURCES})
+WEBKIT_FRAMEWORK(WTF)
+
+if (MSVC)
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SUBSYSTEM:WINDOWS")
+
+ add_custom_command(
+ TARGET WTF
+ PRE_BUILD
+ COMMAND ${PERL_EXECUTABLE} ${WEBKIT_LIBRARIES_DIR}/tools/scripts/auto-version.pl ${DERIVED_SOURCES_DIR}
+ VERBATIM)
+
+ add_custom_command(
+ TARGET WTF
+ POST_BUILD
+ COMMAND ${PERL_EXECUTABLE} ${WEBKIT_LIBRARIES_DIR}/tools/scripts/version-stamp.pl ${DERIVED_SOURCES_DIR} $<TARGET_FILE:WTF>
+ VERBATIM)
+endif ()
diff --git a/Source/WTF/wtf/CONTRIBUTORS.pthreads-win32 b/Source/WTF/wtf/CONTRIBUTORS.pthreads-win32
new file mode 100644
index 000000000..7de0f2606
--- /dev/null
+++ b/Source/WTF/wtf/CONTRIBUTORS.pthreads-win32
@@ -0,0 +1,137 @@
+This is a copy of CONTRIBUTORS file for the Pthreads-win32 library, downloaded
+from http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/pthreads/CONTRIBUTORS?rev=1.32&cvsroot=pthreads-win32
+
+Included here to compliment the Pthreads-win32 license header in wtf/ThreadingWin.cpp file.
+WebKit is using derived sources of ThreadCondition code from Pthreads-win32.
+
+-------------------------------------------------------------------------------
+
+Contributors (in approximate order of appearance)
+
+[See also the ChangeLog file where individuals are
+attributed in log entries. Likewise in the FAQ file.]
+
+Ben Elliston bje at cygnus dot com
+ Initiated the project;
+ setup the project infrastructure (CVS, web page, etc.);
+ early prototype routines.
+Ross Johnson rpj at callisto dot canberra dot edu dot au
+ early prototype routines;
+ ongoing project coordination/maintenance;
+ implementation of spin locks and barriers;
+ various enhancements;
+ bug fixes;
+ documentation;
+ testsuite.
+Robert Colquhoun rjc at trump dot net dot au
+ Early bug fixes.
+John E. Bossom John dot Bossom at cognos dot com
+ Contributed substantial original working implementation;
+ bug fixes;
+ ongoing guidance and standards interpretation.
+Anders Norlander anorland at hem2 dot passagen dot se
+ Early enhancements and runtime checking for supported
+ Win32 routines.
+Tor Lillqvist tml at iki dot fi
+ General enhancements;
+ early bug fixes to condition variables.
+Scott Lightner scott at curriculum dot com
+ Bug fix.
+Kevin Ruland Kevin dot Ruland at anheuser-busch dot com
+ Various bug fixes.
+Mike Russo miker at eai dot com
+ Bug fix.
+Mark E. Armstrong avail at pacbell dot net
+ Bug fixes.
+Lorin Hochstein lmh at xiphos dot ca
+ general bug fixes; bug fixes to condition variables.
+Peter Slacik Peter dot Slacik at tatramed dot sk
+ Bug fixes.
+Mumit Khan khan at xraylith dot wisc dot edu
+ Fixes to work with Mingw32.
+Milan Gardian mg at tatramed dot sk
+ Bug fixes and reports/analyses of obscure problems.
+Aurelio Medina aureliom at crt dot com
+ First implementation of read-write locks.
+Graham Dumpleton Graham dot Dumpleton at ra dot pad dot otc dot telstra dot com dot au
+ Bug fix in condition variables.
+Tristan Savatier tristan at mpegtv dot com
+ WinCE port.
+Erik Hensema erik at hensema dot xs4all dot nl
+ Bug fixes.
+Rich Peters rpeters at micro-magic dot com
+Todd Owen towen at lucidcalm dot dropbear dot id dot au
+ Bug fixes to dll loading.
+Jason Nye jnye at nbnet dot nb dot ca
+ Implementation of async cancelation.
+Fred Forester fforest at eticomm dot net
+Kevin D. Clark kclark at cabletron dot com
+David Baggett dmb at itasoftware dot com
+ Bug fixes.
+Paul Redondo paul at matchvision dot com
+Scott McCaskill scott at 3dfx dot com
+ Bug fixes.
+Jef Gearhart jgearhart at tpssys dot com
+ Bug fix.
+Arthur Kantor akantor at bexusa dot com
+ Mutex enhancements.
+Steven Reddie smr at essemer dot com dot au
+ Bug fix.
+Alexander Terekhov TEREKHOV at de dot ibm dot com
+ Re-implemented and improved read-write locks;
+ (with Louis Thomas) re-implemented and improved
+ condition variables;
+ enhancements to semaphores;
+ enhancements to mutexes;
+ new mutex implementation in 'futex' style;
+ suggested a robust implementation of pthread_once
+ similar to that implemented by V.Kliathcko;
+ system clock change handling re CV timeouts;
+ bug fixes.
+Thomas Pfaff tpfaff at gmx dot net
+ Changes to make C version usable with C++ applications;
+ re-implemented mutex routines to avoid Win32 mutexes
+ and TryEnterCriticalSection;
+ procedure to fix Mingw32 thread-safety issues.
+Franco Bez franco dot bez at gmx dot de
+ procedure to fix Mingw32 thread-safety issues.
+Louis Thomas lthomas at arbitrade dot com
+ (with Alexander Terekhov) re-implemented and improved
+ condition variables.
+David Korn dgk at research dot att dot com
+ Ported to UWIN.
+Phil Frisbie, Jr. phil at hawksoft dot com
+ Bug fix.
+Ralf Brese Ralf dot Brese at pdb4 dot siemens dot de
+ Bug fix.
+prionx at juno dot com prionx at juno dot com
+ Bug fixes.
+Max Woodbury mtew at cds dot duke dot edu
+ POSIX versioning conditionals;
+ reduced namespace pollution;
+ idea to separate routines to reduce statically
+ linked image sizes.
+Rob Fanner rfanner at stonethree dot com
+ Bug fix.
+Michael Johnson michaelj at maine dot rr dot com
+ Bug fix.
+Nicolas Barry boozai at yahoo dot com
+ Bug fixes.
+Piet van Bruggen pietvb at newbridges dot nl
+ Bug fix.
+Makoto Kato raven at oldskool dot jp
+ AMD64 port.
+Panagiotis E. Hadjidoukas peh at hpclab dot ceid dot upatras dot gr
+ Contributed the QueueUserAPCEx package which
+ makes preemptive async cancelation possible.
+Will Bryant will dot bryant at ecosm dot com
+ Borland compiler patch and makefile.
+Anuj Goyal anuj dot goyal at gmail dot com
+ Port to Digital Mars compiler.
+Gottlob Frege gottlobfrege at gmail dot com
+ re-implemented pthread_once (version 2)
+ (pthread_once cancellation added by rpj).
+Vladimir Kliatchko vladimir at kliatchko dot com
+ reimplemented pthread_once with the same form
+ as described by A.Terekhov (later version 2);
+ implementation of MCS (Mellor-Crummey/Scott) locks. \ No newline at end of file
diff --git a/Source/WTF/wtf/CheckedArithmetic.h b/Source/WTF/wtf/CheckedArithmetic.h
index b58da2027..798d6f07c 100644
--- a/Source/WTF/wtf/CheckedArithmetic.h
+++ b/Source/WTF/wtf/CheckedArithmetic.h
@@ -75,11 +75,16 @@ class CrashOnOverflow {
public:
static NO_RETURN_DUE_TO_CRASH void overflowed()
{
- CRASH();
+ crash();
}
void clearOverflow() { }
+ static NO_RETURN_DUE_TO_CRASH void crash()
+ {
+ CRASH();
+ }
+
public:
bool hasOverflowed() const { return false; }
};
@@ -101,6 +106,11 @@ protected:
m_overflowed = false;
}
+ static NO_RETURN_DUE_TO_CRASH void crash()
+ {
+ CRASH();
+ }
+
public:
bool hasOverflowed() const { return m_overflowed; }
@@ -112,63 +122,84 @@ template <typename T, class OverflowHandler = CrashOnOverflow> class Checked;
template <typename T> struct RemoveChecked;
template <typename T> struct RemoveChecked<Checked<T>>;
-template <typename Target, typename Source, bool targetSigned = std::numeric_limits<Target>::is_signed, bool sourceSigned = std::numeric_limits<Source>::is_signed> struct BoundsChecker;
-template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, false> {
+template <typename Target, typename Source, bool isTargetBigger = sizeof(Target) >= sizeof(Source), bool targetSigned = std::numeric_limits<Target>::is_signed, bool sourceSigned = std::numeric_limits<Source>::is_signed> struct BoundsChecker;
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, false, false> {
static bool inBounds(Source value)
{
- // Same signedness so implicit type conversion will always increase precision
- // to widest type
+ // Same signedness so implicit type conversion will always increase precision to widest type.
return value <= std::numeric_limits<Target>::max();
}
};
-
-template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, true> {
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, true, true> {
static bool inBounds(Source value)
{
- // Same signedness so implicit type conversion will always increase precision
- // to widest type
+ // Same signedness so implicit type conversion will always increase precision to widest type.
return std::numeric_limits<Target>::min() <= value && value <= std::numeric_limits<Target>::max();
}
};
-template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, true> {
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, false, true> {
static bool inBounds(Source value)
{
- // Target is unsigned so any value less than zero is clearly unsafe
- if (value < 0)
- return false;
- // If our (unsigned) Target is the same or greater width we can
- // convert value to type Target without losing precision
- if (sizeof(Target) >= sizeof(Source))
- return static_cast<Target>(value) <= std::numeric_limits<Target>::max();
- // The signed Source type has greater precision than the target so
- // max(Target) -> Source will widen.
- return value <= static_cast<Source>(std::numeric_limits<Target>::max());
+ // When converting value to unsigned Source, value will become a big value if value is negative.
+ // Casted value will become bigger than Target::max as Source is bigger than Target.
+ return static_cast<typename std::make_unsigned<Source>::type>(value) <= std::numeric_limits<Target>::max();
}
};
-template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, false> {
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, true, false> {
static bool inBounds(Source value)
{
- // Signed target with an unsigned source
- if (sizeof(Target) <= sizeof(Source))
- return value <= static_cast<Source>(std::numeric_limits<Target>::max());
- // Target is Wider than Source so we're guaranteed to fit any value in
- // unsigned Source
+ // The unsigned Source type has greater precision than the target so max(Target) -> Source will widen.
+ return value <= static_cast<Source>(std::numeric_limits<Target>::max());
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, false, false> {
+ static bool inBounds(Source)
+ {
+ // Same sign, greater or same precision.
return true;
}
};
-template <typename Target, typename Source, bool CanElide = std::is_same<Target, Source>::value || (sizeof(Target) > sizeof(Source)) > struct BoundsCheckElider;
-template <typename Target, typename Source> struct BoundsCheckElider<Target, Source, true> {
- static bool inBounds(Source) { return true; }
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, true, true> {
+ static bool inBounds(Source)
+ {
+ // Same sign, greater or same precision.
+ return true;
+ }
};
-template <typename Target, typename Source> struct BoundsCheckElider<Target, Source, false> : public BoundsChecker<Target, Source> {
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, true, false> {
+ static bool inBounds(Source value)
+ {
+ // Target is signed with greater or same precision. If strictly greater, it is always safe.
+ if (sizeof(Target) > sizeof(Source))
+ return true;
+ return value <= static_cast<Source>(std::numeric_limits<Target>::max());
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, false, true> {
+ static bool inBounds(Source value)
+ {
+ // Target is unsigned with greater precision.
+ return value >= 0;
+ }
};
template <typename Target, typename Source> static inline bool isInBounds(Source value)
{
- return BoundsCheckElider<Target, Source>::inBounds(value);
+ return BoundsChecker<Target, Source>::inBounds(value);
+}
+
+template <typename Target, typename Source> static inline bool convertSafely(Source input, Target& output)
+{
+ if (!isInBounds<Target>(input))
+ return false;
+ output = static_cast<Target>(input);
+ return true;
}
template <typename T> struct RemoveChecked {
@@ -517,23 +548,22 @@ public:
bool operator!() const
{
if (this->hasOverflowed())
- CRASH();
+ this->crash();
return !m_value;
}
- typedef void* (Checked::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const
+ explicit operator bool() const
{
if (this->hasOverflowed())
- CRASH();
- return (m_value) ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0;
+ this->crash();
+ return m_value;
}
// Value accessors. unsafeGet() will crash if there's been an overflow.
T unsafeGet() const
{
if (this->hasOverflowed())
- CRASH();
+ this->crash();
return m_value;
}
@@ -612,7 +642,7 @@ public:
template <typename U> bool operator==(U rhs)
{
if (this->hasOverflowed())
- this->overflowed();
+ this->crash();
return safeEquals(m_value, rhs);
}
@@ -626,6 +656,47 @@ public:
return !(*this == rhs);
}
+ // Other comparisons
+ template <typename V> bool operator<(Checked<T, V> rhs) const
+ {
+ return unsafeGet() < rhs.unsafeGet();
+ }
+
+ bool operator<(T rhs) const
+ {
+ return unsafeGet() < rhs;
+ }
+
+ template <typename V> bool operator<=(Checked<T, V> rhs) const
+ {
+ return unsafeGet() <= rhs.unsafeGet();
+ }
+
+ bool operator<=(T rhs) const
+ {
+ return unsafeGet() <= rhs;
+ }
+
+ template <typename V> bool operator>(Checked<T, V> rhs) const
+ {
+ return unsafeGet() > rhs.unsafeGet();
+ }
+
+ bool operator>(T rhs) const
+ {
+ return unsafeGet() > rhs;
+ }
+
+ template <typename V> bool operator>=(Checked<T, V> rhs) const
+ {
+ return unsafeGet() >= rhs.unsafeGet();
+ }
+
+ bool operator>=(T rhs) const
+ {
+ return unsafeGet() >= rhs;
+ }
+
private:
// Disallow implicit conversion of floating point to integer types
Checked(float);
@@ -735,6 +806,30 @@ template<typename T, typename... Args> bool sumOverflows(Args... args)
return checkedSum<T>(args...).hasOverflowed();
}
+template<typename T, typename U> bool differenceOverflows(U left, U right)
+{
+ return (Checked<T, RecordOverflow>(left) - Checked<T, RecordOverflow>(right)).hasOverflowed();
+}
+
+template<typename T, typename U>
+Checked<T, RecordOverflow> checkedProduct(U value)
+{
+ return Checked<T, RecordOverflow>(value);
+}
+template<typename T, typename U, typename... Args>
+Checked<T, RecordOverflow> checkedProduct(U value, Args... args)
+{
+ return Checked<T, RecordOverflow>(value) * checkedProduct<T>(args...);
+}
+
+// Sometimes, you just want to check if some math would overflow - the code to do the math is
+// already in place, and you want to guard it.
+
+template<typename T, typename... Args> bool productOverflows(Args... args)
+{
+ return checkedProduct<T>(args...).hasOverflowed();
+}
+
}
using WTF::Checked;
@@ -750,6 +845,8 @@ using WTF::CheckedInt64;
using WTF::CheckedUint64;
using WTF::CheckedSize;
using WTF::checkedSum;
+using WTF::differenceOverflows;
+using WTF::productOverflows;
using WTF::sumOverflows;
#endif
diff --git a/Source/WTF/wtf/ByteSpinLock.h b/Source/WTF/wtf/ClockType.cpp
index 9ee003334..df8ee182f 100644
--- a/Source/WTF/wtf/ByteSpinLock.h
+++ b/Source/WTF/wtf/ClockType.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,49 +23,25 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ByteSpinLock_h
-#define ByteSpinLock_h
+#include "config.h"
+#include "ClockType.h"
-#include <thread>
-#include <wtf/Assertions.h>
-#include <wtf/Atomics.h>
-#include <wtf/Locker.h>
-#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
namespace WTF {
-class ByteSpinLock {
- WTF_MAKE_NONCOPYABLE(ByteSpinLock);
-public:
- ByteSpinLock()
- : m_lock(0)
- {
+void printInternal(PrintStream& out, ClockType type)
+{
+ switch (type) {
+ case ClockType::Wall:
+ out.print("Wall");
+ return;
+ case ClockType::Monotonic:
+ out.print("Monotonic");
+ return;
}
-
- void lock()
- {
- while (!weakCompareAndSwap(&m_lock, 0, 1))
- std::this_thread::yield();
- memoryBarrierAfterLock();
- }
-
- void unlock()
- {
- memoryBarrierBeforeUnlock();
- m_lock = 0;
- }
-
- bool isHeld() const { return !!m_lock; }
-
-private:
- uint8_t m_lock;
-};
-
-typedef Locker<ByteSpinLock> ByteSpinLocker;
+ RELEASE_ASSERT_NOT_REACHED();
+}
} // namespace WTF
-using WTF::ByteSpinLock;
-using WTF::ByteSpinLocker;
-
-#endif // ByteSpinLock_h
diff --git a/Source/WTF/wtf/Decoder.h b/Source/WTF/wtf/ClockType.h
index 341d58d73..15491062c 100644
--- a/Source/WTF/wtf/Decoder.h
+++ b/Source/WTF/wtf/ClockType.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,35 +23,22 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Decoder_h
-#define Decoder_h
-
-#include <wtf/Vector.h>
+#ifndef WTF_ClockType_h
+#define WTF_ClockType_h
namespace WTF {
-class String;
-
-class Decoder {
-protected:
- Decoder() { }
- virtual ~Decoder() { }
-
-public:
- virtual bool decodeBytes(Vector<uint8_t>&) = 0;
-
- virtual bool decodeBool(bool&) = 0;
- virtual bool decodeUInt32(uint32_t&) = 0;
- virtual bool decodeUInt64(uint64_t&) = 0;
- virtual bool decodeInt32(int32_t&) = 0;
- virtual bool decodeInt64(int64_t&) = 0;
- virtual bool decodeFloat(float&) = 0;
- virtual bool decodeDouble(double&) = 0;
- virtual bool decodeString(String&) = 0;
+class PrintStream;
+
+enum class ClockType {
+ Wall,
+ Monotonic
};
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, ClockType);
+
} // namespace WTF
-using WTF::Decoder;
+using WTF::ClockType;
-#endif // Decoder_h
+#endif // WTF_ClockType_h
diff --git a/Source/WTF/wtf/CommaPrinter.h b/Source/WTF/wtf/CommaPrinter.h
index a8f3d3917..e6ab9e810 100644
--- a/Source/WTF/wtf/CommaPrinter.h
+++ b/Source/WTF/wtf/CommaPrinter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,25 +32,30 @@ namespace WTF {
class CommaPrinter {
public:
- CommaPrinter(const char* comma = ", ")
+ CommaPrinter(const char* comma = ", ", const char* start = "")
: m_comma(comma)
- , m_isFirst(true)
+ , m_start(start)
+ , m_didPrint(false)
{
}
void dump(PrintStream& out) const
{
- if (m_isFirst) {
- m_isFirst = false;
+ if (!m_didPrint) {
+ out.print(m_start);
+ m_didPrint = true;
return;
}
out.print(m_comma);
}
+ bool didPrint() const { return m_didPrint; }
+
private:
const char* m_comma;
- mutable bool m_isFirst;
+ const char* m_start;
+ mutable bool m_didPrint;
};
} // namespace WTF
diff --git a/Source/WTF/wtf/CompilationThread.cpp b/Source/WTF/wtf/CompilationThread.cpp
index 4b026ac54..9761a0fc6 100644
--- a/Source/WTF/wtf/CompilationThread.cpp
+++ b/Source/WTF/wtf/CompilationThread.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,13 +33,13 @@
namespace WTF {
-static ThreadSpecific<bool>* s_isCompilationThread;
+static ThreadSpecific<bool, CanBeGCThread::True>* s_isCompilationThread;
static void initializeCompilationThreads()
{
static std::once_flag initializeCompilationThreadsOnceFlag;
std::call_once(initializeCompilationThreadsOnceFlag, []{
- s_isCompilationThread = new ThreadSpecific<bool>();
+ s_isCompilationThread = new ThreadSpecific<bool, CanBeGCThread::True>();
});
}
diff --git a/Source/WTF/wtf/CompilationThread.h b/Source/WTF/wtf/CompilationThread.h
index 6e3fcdf20..f914744f6 100644
--- a/Source/WTF/wtf/CompilationThread.h
+++ b/Source/WTF/wtf/CompilationThread.h
@@ -28,7 +28,6 @@
namespace WTF {
-WTF_EXPORT_PRIVATE bool isCompilationThread();
WTF_EXPORT_PRIVATE bool exchangeIsCompilationThread(bool newValue);
class CompilationScope {
@@ -55,7 +54,6 @@ private:
using WTF::CompilationScope;
using WTF::exchangeIsCompilationThread;
-using WTF::isCompilationThread;
#endif // CompilationThread_h
diff --git a/Source/WTF/wtf/Compiler.h b/Source/WTF/wtf/Compiler.h
index 2e2cfba4b..aa41805b2 100644
--- a/Source/WTF/wtf/Compiler.h
+++ b/Source/WTF/wtf/Compiler.h
@@ -35,56 +35,71 @@
/* COMPILER_QUIRK() - whether the compiler being used to build the project requires a given quirk. */
#define COMPILER_QUIRK(WTF_COMPILER_QUIRK) (defined WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK && WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK)
+/* COMPILER_HAS_CLANG_BUILTIN() - whether the compiler supports a particular clang builtin. */
+#ifdef __has_builtin
+#define COMPILER_HAS_CLANG_BUILTIN(x) __has_builtin(x)
+#else
+#define COMPILER_HAS_CLANG_BUILTIN(x) 0
+#endif
+
+/* COMPILER_HAS_CLANG_HEATURE() - whether the compiler supports a particular language or library feature. */
+/* http://clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension */
+#ifdef __has_feature
+#define COMPILER_HAS_CLANG_FEATURE(x) __has_feature(x)
+#else
+#define COMPILER_HAS_CLANG_FEATURE(x) 0
+#endif
+
/* ==== COMPILER() - primary detection of the compiler being used to build the project, in alphabetical order ==== */
/* COMPILER(CLANG) - Clang */
#if defined(__clang__)
#define WTF_COMPILER_CLANG 1
-#define WTF_COMPILER_SUPPORTS_BLOCKS __has_feature(blocks)
-#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT __has_feature(c_static_assert)
-#define WTF_COMPILER_SUPPORTS_CXX_CONSTEXPR __has_feature(cxx_constexpr)
-#define WTF_COMPILER_SUPPORTS_CXX_REFERENCE_QUALIFIED_FUNCTIONS __has_feature(cxx_reference_qualified_functions)
-#define WTF_COMPILER_SUPPORTS_CXX_USER_LITERALS __has_feature(cxx_user_literals)
-#define WTF_COMPILER_SUPPORTS_FALLTHROUGH_WARNINGS __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
-#endif
+#define WTF_COMPILER_SUPPORTS_BLOCKS COMPILER_HAS_CLANG_FEATURE(blocks)
+#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT COMPILER_HAS_CLANG_FEATURE(c_static_assert)
+#define WTF_COMPILER_SUPPORTS_CXX_REFERENCE_QUALIFIED_FUNCTIONS COMPILER_HAS_CLANG_FEATURE(cxx_reference_qualified_functions)
+#define WTF_COMPILER_SUPPORTS_CXX_USER_LITERALS COMPILER_HAS_CLANG_FEATURE(cxx_user_literals)
+#define WTF_COMPILER_SUPPORTS_FALLTHROUGH_WARNINGS COMPILER_HAS_CLANG_FEATURE(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
+#define WTF_COMPILER_SUPPORTS_CXX_EXCEPTIONS COMPILER_HAS_CLANG_FEATURE(cxx_exceptions)
+#define WTF_COMPILER_SUPPORTS_BUILTIN_IS_TRIVIALLY_COPYABLE COMPILER_HAS_CLANG_FEATURE(is_trivially_copyable)
-/* COMPILER(GCC) - GNU Compiler Collection */
+#ifdef __cplusplus
+#if __cplusplus <= 201103L
+#define WTF_CPP_STD_VER 11
+#elif __cplusplus <= 201402L
+#define WTF_CPP_STD_VER 14
+#endif
+#endif
-/* Note: This section must come after the Clang section since we check !COMPILER(CLANG) here. */
+#endif // defined(__clang__)
+/* COMPILER(GCC_OR_CLANG) - GNU Compiler Collection or Clang */
#if defined(__GNUC__)
-#define WTF_COMPILER_GCC 1
-#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#define GCC_VERSION_AT_LEAST(major, minor, patch) (GCC_VERSION >= (major * 10000 + minor * 100 + patch))
+#define WTF_COMPILER_GCC_OR_CLANG 1
#endif
-/* Define GCC_VERSION_AT_LEAST for all compilers, so we can write things like GCC_VERSION_AT_LEAST(4, 1, 0). */
-/* FIXME: Doesn't seem all that valuable. Can we remove this? */
-#if !defined(GCC_VERSION_AT_LEAST)
-#define GCC_VERSION_AT_LEAST(major, minor, patch) 0
-#endif
+/* COMPILER(GCC) - GNU Compiler Collection */
+/* Note: This section must come after the Clang section since we check !COMPILER(CLANG) here. */
+#if COMPILER(GCC_OR_CLANG) && !COMPILER(CLANG)
+#define WTF_COMPILER_GCC 1
+#define WTF_COMPILER_SUPPORTS_CXX_USER_LITERALS 1
+#define WTF_COMPILER_SUPPORTS_CXX_REFERENCE_QUALIFIED_FUNCTIONS 1
-#if COMPILER(GCC) && !COMPILER(CLANG) && !GCC_VERSION_AT_LEAST(4, 7, 0)
-#error "Please use a newer version of GCC. WebKit requires GCC 4.7.0 or newer to compile."
-#endif
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION_AT_LEAST(major, minor, patch) (GCC_VERSION >= (major * 10000 + minor * 100 + patch))
-#if COMPILER(GCC) && !COMPILER(CLANG)
-#define WTF_COMPILER_SUPPORTS_CXX_CONSTEXPR 1
-#define WTF_COMPILER_SUPPORTS_CXX_USER_LITERALS 1
+#if !GCC_VERSION_AT_LEAST(4, 9, 0)
+#error "Please use a newer version of GCC. WebKit requires GCC 4.9.0 or newer to compile."
#endif
-#if COMPILER(GCC) && !COMPILER(CLANG) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT 1
#endif
-#if COMPILER(GCC) && !COMPILER(CLANG) && GCC_VERSION_AT_LEAST(4, 8, 0)
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
-#if COMPILER(GCC) && !COMPILER(CLANG) && (defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(__cplusplus) && __cplusplus >= 201103L))
-#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
-#endif
+#endif /* COMPILER(GCC) */
/* COMPILER(MINGW) - MinGW GCC */
@@ -104,11 +119,14 @@
/* COMPILER(MSVC) - Microsoft Visual C++ */
#if defined(_MSC_VER)
+
#define WTF_COMPILER_MSVC 1
+#define WTF_COMPILER_SUPPORTS_CXX_REFERENCE_QUALIFIED_FUNCTIONS 1
+
+#if _MSC_VER < 1900
+#error "Please use a newer version of Visual Studio. WebKit requires VS2015 or newer to compile."
#endif
-#if defined(_MSC_VER) && _MSC_VER < 1800
-#error "Please use a newer version of Visual Studio. WebKit requires VS2013 or newer to compile."
#endif
/* COMPILER(SUNCC) */
@@ -117,6 +135,10 @@
#define WTF_COMPILER_SUNCC 1
#endif
+#if !COMPILER(CLANG) && !COMPILER(MSVC)
+#define WTF_COMPILER_QUIRK_CONSIDERS_UNREACHABLE_CODE 1
+#endif
+
/* ==== COMPILER_SUPPORTS - additional compiler feature detection, in alphabetical order ==== */
/* COMPILER_SUPPORTS(EABI) */
@@ -125,11 +147,33 @@
#define WTF_COMPILER_SUPPORTS_EABI 1
#endif
+/* RELAXED_CONSTEXPR */
+
+#if defined(__cpp_constexpr) && __cpp_constexpr >= 201304
+#define WTF_COMPILER_SUPPORTS_RELAXED_CONSTEXPR 1
+#endif
+
+#if !defined(RELAXED_CONSTEXPR)
+#if COMPILER_SUPPORTS(RELAXED_CONSTEXPR)
+#define RELAXED_CONSTEXPR constexpr
+#else
+#define RELAXED_CONSTEXPR
+#endif
+#endif
+
+#define ASAN_ENABLED COMPILER_HAS_CLANG_FEATURE(address_sanitizer)
+
+#if ASAN_ENABLED
+#define SUPPRESS_ASAN __attribute__((no_sanitize_address))
+#else
+#define SUPPRESS_ASAN
+#endif
+
/* ==== Compiler-independent macros for various compiler features, in alphabetical order ==== */
/* ALWAYS_INLINE */
-#if !defined(ALWAYS_INLINE) && COMPILER(GCC) && defined(NDEBUG) && !COMPILER(MINGW)
+#if !defined(ALWAYS_INLINE) && COMPILER(GCC_OR_CLANG) && defined(NDEBUG) && !COMPILER(MINGW)
#define ALWAYS_INLINE inline __attribute__((__always_inline__))
#endif
@@ -141,14 +185,14 @@
#define ALWAYS_INLINE inline
#endif
-/* CONSTEXPR */
+/* WTF_EXTERN_C_{BEGIN, END} */
-#if !defined(CONSTEXPR) && COMPILER_SUPPORTS(CXX_CONSTEXPR)
-#define CONSTEXPR constexpr
-#endif
-
-#if !defined(CONSTEXPR)
-#define CONSTEXPR
+#ifdef __cplusplus
+#define WTF_EXTERN_C_BEGIN extern "C" {
+#define WTF_EXTERN_C_END }
+#else
+#define WTF_EXTERN_C_BEGIN
+#define WTF_EXTERN_C_END
#endif
/* FALLTHROUGH */
@@ -163,7 +207,7 @@
/* LIKELY */
-#if !defined(LIKELY) && COMPILER(GCC)
+#if !defined(LIKELY) && COMPILER(GCC_OR_CLANG)
#define LIKELY(x) __builtin_expect(!!(x), 1)
#endif
@@ -173,7 +217,7 @@
/* NEVER_INLINE */
-#if !defined(NEVER_INLINE) && COMPILER(GCC)
+#if !defined(NEVER_INLINE) && COMPILER(GCC_OR_CLANG)
#define NEVER_INLINE __attribute__((__noinline__))
#endif
@@ -187,7 +231,7 @@
/* NO_RETURN */
-#if !defined(NO_RETURN) && COMPILER(GCC)
+#if !defined(NO_RETURN) && COMPILER(GCC_OR_CLANG)
#define NO_RETURN __attribute((__noreturn__))
#endif
@@ -199,6 +243,15 @@
#define NO_RETURN
#endif
+/* RETURNS_NONNULL */
+#if !defined(RETURNS_NONNULL) && COMPILER(GCC_OR_CLANG)
+#define RETURNS_NONNULL __attribute__((returns_nonnull))
+#endif
+
+#if !defined(RETURNS_NONNULL)
+#define RETURNS_NONNULL
+#endif
+
/* NO_RETURN_WITH_VALUE */
#if !defined(NO_RETURN_WITH_VALUE) && !COMPILER(MSVC)
@@ -221,7 +274,7 @@
/* PURE_FUNCTION */
-#if !defined(PURE_FUNCTION) && COMPILER(GCC)
+#if !defined(PURE_FUNCTION) && COMPILER(GCC_OR_CLANG)
#define PURE_FUNCTION __attribute__((__pure__))
#endif
@@ -231,7 +284,7 @@
/* REFERENCED_FROM_ASM */
-#if !defined(REFERENCED_FROM_ASM) && COMPILER(GCC)
+#if !defined(REFERENCED_FROM_ASM) && COMPILER(GCC_OR_CLANG)
#define REFERENCED_FROM_ASM __attribute__((__used__))
#endif
@@ -241,7 +294,7 @@
/* UNLIKELY */
-#if !defined(UNLIKELY) && COMPILER(GCC)
+#if !defined(UNLIKELY) && COMPILER(GCC_OR_CLANG)
#define UNLIKELY(x) __builtin_expect(!!(x), 0)
#endif
@@ -274,7 +327,7 @@
/* WARN_UNUSED_RETURN */
-#if !defined(WARN_UNUSED_RETURN) && COMPILER(GCC)
+#if !defined(WARN_UNUSED_RETURN) && COMPILER(GCC_OR_CLANG)
#define WARN_UNUSED_RETURN __attribute__((__warn_unused_result__))
#endif
@@ -282,4 +335,8 @@
#define WARN_UNUSED_RETURN
#endif
+#if !defined(__has_include) && COMPILER(MSVC)
+#define __has_include(path) 0
+#endif
+
#endif /* WTF_Compiler_h */
diff --git a/Source/WTF/wtf/Compression.cpp b/Source/WTF/wtf/Compression.cpp
deleted file mode 100644
index f30924b9c..000000000
--- a/Source/WTF/wtf/Compression.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "Compression.h"
-
-#include "CheckedArithmetic.h"
-
-#if USE(ZLIB) && !COMPILER(MSVC)
-
-#include <string.h>
-#include <zlib.h>
-
-namespace WTF {
-
-static void* zAlloc(void*, uint32_t count, uint32_t size)
-{
- CheckedSize allocSize = count;
- allocSize *= size;
- if (allocSize.hasOverflowed())
- return Z_NULL;
- void* result = 0;
- if (tryFastMalloc(allocSize.unsafeGet()).getValue(result))
- return result;
- return Z_NULL;
-}
-
-static void zFree(void*, void* data)
-{
- fastFree(data);
-}
-
-PassOwnPtr<GenericCompressedData> GenericCompressedData::create(const uint8_t* data, size_t dataLength)
-{
- enum { MinimumSize = sizeof(GenericCompressedData) * 8 };
-
- if (!data || dataLength < MinimumSize)
- return nullptr;
-
- z_stream stream;
- memset(&stream, 0, sizeof(stream));
- stream.zalloc = zAlloc;
- stream.zfree = zFree;
- stream.data_type = Z_BINARY;
- stream.opaque = Z_NULL;
- stream.avail_in = dataLength;
- stream.next_in = const_cast<uint8_t*>(data);
-
- size_t currentOffset = OBJECT_OFFSETOF(GenericCompressedData, m_data);
- size_t currentCapacity = fastMallocGoodSize(MinimumSize);
- Bytef* compressedData = static_cast<Bytef*>(fastMalloc(currentCapacity));
- memset(compressedData, 0, sizeof(GenericCompressedData));
- stream.next_out = compressedData + currentOffset;
- stream.avail_out = currentCapacity - currentOffset;
-
- deflateInit(&stream, Z_BEST_COMPRESSION);
-
- while (true) {
- int deflateResult = deflate(&stream, Z_FINISH);
- if (deflateResult == Z_OK || !stream.avail_out) {
- size_t newCapacity = 0;
- currentCapacity -= stream.avail_out;
- if (!stream.avail_in)
- newCapacity = currentCapacity + 8;
- else {
- // Determine average capacity
- size_t compressedContent = stream.next_in - data;
- double expectedSize = static_cast<double>(dataLength) * compressedContent / currentCapacity;
-
- // Expand capacity by at least 8 bytes so we're always growing, and to
- // compensate for any exaggerated ideas of how effectively we'll compress
- // data in the future.
- newCapacity = std::max(static_cast<size_t>(expectedSize + 8), currentCapacity + 8);
- }
- newCapacity = fastMallocGoodSize(newCapacity);
- if (newCapacity >= dataLength)
- goto fail;
- compressedData = static_cast<Bytef*>(fastRealloc(compressedData, newCapacity));
- currentOffset = currentCapacity - stream.avail_out;
- stream.next_out = compressedData + currentOffset;
- stream.avail_out = newCapacity - currentCapacity;
- currentCapacity = newCapacity;
- continue;
- }
-
- if (deflateResult == Z_STREAM_END) {
- ASSERT(!stream.avail_in);
- break;
- }
-
- ASSERT_NOT_REACHED();
- fail:
- deflateEnd(&stream);
- fastFree(compressedData);
- return nullptr;
- }
- deflateEnd(&stream);
- static int64_t totalCompressed = 0;
- static int64_t totalInput = 0;
-
- totalCompressed += currentCapacity;
- totalInput += dataLength;
- GenericCompressedData* result = new (compressedData) GenericCompressedData(dataLength, stream.total_out);
- return adoptPtr(result);
-}
-
-bool GenericCompressedData::decompress(uint8_t* destination, size_t bufferSize, size_t* decompressedByteCount)
-{
- if (decompressedByteCount)
- *decompressedByteCount = 0;
- z_stream stream;
- memset(&stream, 0, sizeof(stream));
- stream.zalloc = zAlloc;
- stream.zfree = zFree;
- stream.data_type = Z_BINARY;
- stream.opaque = Z_NULL;
- stream.next_out = destination;
- stream.avail_out = bufferSize;
- stream.next_in = m_data;
- stream.avail_in = compressedSize();
- if (inflateInit(&stream) != Z_OK) {
- ASSERT_NOT_REACHED();
- return false;
- }
-
- int inflateResult = inflate(&stream, Z_FINISH);
- inflateEnd(&stream);
-
- ASSERT(stream.total_out <= bufferSize);
- if (decompressedByteCount)
- *decompressedByteCount = stream.total_out;
-
- if (inflateResult != Z_STREAM_END) {
- ASSERT_NOT_REACHED();
- return false;
- }
-
- return true;
-}
-
-}
-
-#else
-
-namespace WTF {
-PassOwnPtr<GenericCompressedData> GenericCompressedData::create(const uint8_t*, size_t)
-{
- return nullptr;
-}
-
-bool GenericCompressedData::decompress(uint8_t*, size_t, size_t*)
-{
- return false;
-}
-}
-
-#endif
diff --git a/Source/WTF/wtf/Compression.h b/Source/WTF/wtf/Compression.h
deleted file mode 100644
index 4c8fd06ed..000000000
--- a/Source/WTF/wtf/Compression.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef Compression_h
-#define Compression_h
-
-#include <wtf/PassOwnPtr.h>
-#include <wtf/Vector.h>
-
-namespace WTF {
-
-class GenericCompressedData {
- WTF_MAKE_NONCOPYABLE(GenericCompressedData)
- WTF_MAKE_FAST_ALLOCATED;
-public:
- WTF_EXPORT_PRIVATE static PassOwnPtr<GenericCompressedData> create(const uint8_t*, size_t);
- uint32_t compressedSize() const { return m_compressedSize; }
- uint32_t originalSize() const { return m_originalSize; }
-
- WTF_EXPORT_PRIVATE bool decompress(uint8_t* destination, size_t bufferSize, size_t* decompressedByteCount = 0);
-
-private:
- GenericCompressedData(size_t originalSize, size_t compressedSize)
- {
- UNUSED_PARAM(m_data);
- ASSERT(!m_originalSize);
- ASSERT(!m_compressedSize);
- m_originalSize = originalSize;
- m_compressedSize = compressedSize;
- }
- uint32_t m_originalSize;
- uint32_t m_compressedSize;
- uint8_t m_data[1];
-};
-
-template <typename T> class CompressedVector : public GenericCompressedData {
-public:
- static PassOwnPtr<CompressedVector> create(const Vector<T>& source)
- {
- OwnPtr<GenericCompressedData> result = GenericCompressedData::create(reinterpret_cast<const uint8_t*>(source.data()), sizeof(T) * source.size());
- return adoptPtr(static_cast<CompressedVector<T>*>(result.leakPtr()));
- }
-
- void decompress(Vector<T>& destination)
- {
- Vector<T> output(originalSize() / sizeof(T));
- ASSERT(output.size() * sizeof(T) == originalSize());
- size_t decompressedByteCount = 0;
- GenericCompressedData::decompress(reinterpret_cast<uint8_t*>(output.data()), originalSize(), &decompressedByteCount);
- ASSERT(decompressedByteCount == originalSize());
- ASSERT(output.size() * sizeof(T) == decompressedByteCount);
-
- destination.swap(output);
- }
-
- size_t size() const { return originalSize() / sizeof(T); }
-};
-
-template <typename T> class CompressibleVector {
- WTF_MAKE_NONCOPYABLE(CompressibleVector)
-public:
- CompressibleVector(size_t size = 0)
- : m_decompressedData(size)
- {
- }
-
- typedef typename Vector<T>::iterator iterator;
- typedef typename Vector<T>::const_iterator const_iterator;
-
- void shrinkToFit()
- {
- ASSERT(!m_compressedData);
- m_compressedData = CompressedVector<T>::create(m_decompressedData);
- if (m_compressedData)
- m_decompressedData.clear();
- else
- m_decompressedData.shrinkToFit();
- }
-
- size_t size()
- {
- if (m_compressedData)
- return m_compressedData->size();
- return m_decompressedData.size();
- }
-
- template <typename U> T& operator[](Checked<U> index) { return data().at(index); }
- template <typename U> const T& operator[](Checked<U> index) const { return data().at(index); }
- template <typename U> T& at(Checked<U> index) { return data().at(index); }
- template <typename U> const T& at(Checked<U> index) const { return data().at(index); }
-
- iterator begin() { return data().begin(); }
- iterator end() { return data().end(); }
- const_iterator begin() const { return data().begin(); }
- const_iterator end() const { return data().end(); }
-
- const Vector<T>& data() const
- {
- decompressIfNecessary();
- return m_decompressedData;
- }
-
- Vector<T>& data()
- {
- decompressIfNecessary();
- return m_decompressedData;
- }
-
-private:
- void decompressIfNecessary() const
- {
- if (!m_compressedData)
- return;
- m_compressedData->decompress(m_decompressedData);
- m_compressedData.clear();
- }
- mutable Vector<T> m_decompressedData;
- mutable OwnPtr<CompressedVector<T>> m_compressedData;
-};
-
-}
-
-using WTF::GenericCompressedData;
-using WTF::CompressedVector;
-using WTF::CompressibleVector;
-
-#endif
diff --git a/Source/WTF/wtf/Condition.h b/Source/WTF/wtf/Condition.h
new file mode 100644
index 000000000..bb0c9a541
--- /dev/null
+++ b/Source/WTF/wtf/Condition.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Condition_h
+#define WTF_Condition_h
+
+#include <functional>
+#include <wtf/CurrentTime.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/ParkingLot.h>
+#include <wtf/TimeWithDynamicClockType.h>
+
+namespace WTF {
+
+// This is a condition variable that is suitable for use with any lock-like object, including
+// our own WTF::Lock. It features standard wait()/notifyOne()/notifyAll() methods in addition to
+// a variety of wait-with-timeout methods. This includes methods that use WTF's own notion of
+// time, like wall-clock time (i.e. currentTime()) and monotonic time (i.e.
+// monotonicallyIncreasingTime()). This is a very efficient condition variable. It only requires
+// one byte of memory. notifyOne() and notifyAll() require just a load and branch for the fast
+// case where no thread is waiting. This condition variable, when used with WTF::Lock, can
+// outperform a system condition variable and lock by up to 58x.
+
+// This is a struct without a constructor or destructor so that it can be statically initialized.
+// Use Lock in instance variables.
+struct ConditionBase {
+ // Condition will accept any kind of time and convert it internally, but this typedef tells
+ // you what kind of time Condition would be able to use without conversions. However, if you
+ // are unlikely to be affected by the cost of conversions, it is better to use MonotonicTime.
+ typedef ParkingLot::Time Time;
+
+ // Wait on a parking queue while releasing the given lock. It will unlock the lock just before
+ // parking, and relock it upon wakeup. Returns true if we woke up due to some call to
+ // notifyOne() or notifyAll(). Returns false if we woke up due to a timeout. Note that this form
+ // of waitUntil() has some quirks:
+ //
+ // No spurious wake-up: in order for this to return before the timeout, some notifyOne() or
+ // notifyAll() call must have happened. No scenario other than timeout or notify can lead to this
+ // method returning. This means, for example, that you can't use pthread cancelation or signals to
+ // cause early return.
+ //
+ // Past timeout: it's possible for waitUntil() to be called with a timeout in the past. In that
+ // case, waitUntil() will still release the lock and reacquire it. waitUntil() will always return
+ // false in that case. This is subtly different from some pthread_cond_timedwait() implementations,
+ // which may not release the lock for past timeout. But, this behavior is consistent with OpenGroup
+ // documentation for timedwait().
+ template<typename LockType>
+ bool waitUntil(LockType& lock, const TimeWithDynamicClockType& timeout)
+ {
+ bool result;
+ if (timeout < timeout.nowWithSameClock()) {
+ lock.unlock();
+ result = false;
+ } else {
+ result = ParkingLot::parkConditionally(
+ &m_hasWaiters,
+ [this] () -> bool {
+ // Let everyone know that we will be waiting. Do this while we hold the queue lock,
+ // to prevent races with notifyOne().
+ m_hasWaiters.store(true);
+ return true;
+ },
+ [&lock] () { lock.unlock(); },
+ timeout).wasUnparked;
+ }
+ lock.lock();
+ return result;
+ }
+
+ // Wait until the given predicate is satisfied. Returns true if it is satisfied in the end.
+ // May return early due to timeout.
+ template<typename LockType, typename Functor>
+ bool waitUntil(
+ LockType& lock, const TimeWithDynamicClockType& timeout, const Functor& predicate)
+ {
+ while (!predicate()) {
+ if (!waitUntil(lock, timeout))
+ return predicate();
+ }
+ return true;
+ }
+
+ // Wait until the given predicate is satisfied. Returns true if it is satisfied in the end.
+ // May return early due to timeout.
+ template<typename LockType, typename Functor>
+ bool waitFor(
+ LockType& lock, Seconds relativeTimeout, const Functor& predicate)
+ {
+ return waitUntil(lock, MonotonicTime::now() + relativeTimeout, predicate);
+ }
+
+ template<typename LockType>
+ bool waitFor(LockType& lock, Seconds relativeTimeout)
+ {
+ return waitUntil(lock, MonotonicTime::now() + relativeTimeout);
+ }
+
+ template<typename LockType>
+ void wait(LockType& lock)
+ {
+ waitUntil(lock, Time::infinity());
+ }
+
+ template<typename LockType, typename Functor>
+ void wait(LockType& lock, const Functor& predicate)
+ {
+ while (!predicate())
+ wait(lock);
+ }
+
+ // Note that this method is extremely fast when nobody is waiting. It is not necessary to try to
+ // avoid calling this method. This returns true if someone was actually woken up.
+ bool notifyOne()
+ {
+ if (!m_hasWaiters.load()) {
+ // At this exact instant, there is nobody waiting on this condition. The way to visualize
+ // this is that if unparkOne() ran to completion without obstructions at this moment, it
+ // wouldn't wake anyone up. Hence, we have nothing to do!
+ return false;
+ }
+
+ bool didNotifyThread = false;
+ ParkingLot::unparkOne(
+ &m_hasWaiters,
+ [&] (ParkingLot::UnparkResult result) -> intptr_t {
+ if (!result.mayHaveMoreThreads)
+ m_hasWaiters.store(false);
+ didNotifyThread = result.didUnparkThread;
+ return 0;
+ });
+ return didNotifyThread;
+ }
+
+ void notifyAll()
+ {
+ if (!m_hasWaiters.load()) {
+ // See above.
+ return;
+ }
+
+ // It's totally safe for us to set this to false without any locking, because this thread is
+ // guaranteed to then unparkAll() anyway. So, if there is a race with some thread calling
+ // wait() just before this store happens, that thread is guaranteed to be awoken by the call to
+ // unparkAll(), below.
+ m_hasWaiters.store(false);
+
+ ParkingLot::unparkAll(&m_hasWaiters);
+ }
+
+protected:
+ Atomic<bool> m_hasWaiters;
+};
+
+class Condition : public ConditionBase {
+ WTF_MAKE_NONCOPYABLE(Condition);
+public:
+ Condition()
+ {
+ m_hasWaiters.store(false);
+ }
+};
+
+typedef ConditionBase StaticCondition;
+
+} // namespace WTF
+
+using WTF::Condition;
+using WTF::StaticCondition;
+
+#endif // WTF_Condition_h
+
diff --git a/Source/WTF/wtf/CrossThreadCopier.cpp b/Source/WTF/wtf/CrossThreadCopier.cpp
new file mode 100644
index 000000000..e59249f98
--- /dev/null
+++ b/Source/WTF/wtf/CrossThreadCopier.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include <wtf/CrossThreadCopier.h>
+
+#include <wtf/CrossThreadTask.h>
+
+namespace WTF {
+
+// Test CrossThreadCopier using COMPILE_ASSERT.
+
+// Verify that ThreadSafeRefCounted objects get handled correctly.
+class CopierThreadSafeRefCountedTest : public ThreadSafeRefCounted<CopierThreadSafeRefCountedTest> {
+};
+
+COMPILE_ASSERT((std::is_same<
+ PassRefPtr<CopierThreadSafeRefCountedTest>,
+ CrossThreadCopier<PassRefPtr<CopierThreadSafeRefCountedTest>>::Type
+ >::value),
+ PassRefPtrTest);
+COMPILE_ASSERT((std::is_same<
+ PassRefPtr<CopierThreadSafeRefCountedTest>,
+ CrossThreadCopier<RefPtr<CopierThreadSafeRefCountedTest>>::Type
+ >::value),
+ RefPtrTest);
+COMPILE_ASSERT((std::is_same<
+ PassRefPtr<CopierThreadSafeRefCountedTest>,
+ CrossThreadCopier<CopierThreadSafeRefCountedTest*>::Type
+ >::value),
+ RawPointerTest);
+
+// Add specializations for RefCounted types which will let us verify that no other template matches.
+template<typename T> struct CrossThreadCopierBase<false, false, RefPtr<T>> {
+ typedef int Type;
+};
+
+template<typename T> struct CrossThreadCopierBase<false, false, PassRefPtr<T>> {
+ typedef int Type;
+};
+
+template<typename T> struct CrossThreadCopierBase<false, false, T*> {
+ typedef int Type;
+};
+
+// Verify that RefCounted objects only match the above templates which expose Type as int.
+class CopierRefCountedTest : public RefCounted<CopierRefCountedTest> {
+};
+
+static_assert((std::is_same<int, CrossThreadCopier<PassRefPtr<CopierRefCountedTest>>::Type>::value), "CrossThreadCopier specialization improperly applied to PassRefPtr<> of a RefCounted (but not ThreadSafeRefCounted) type");
+static_assert((std::is_same<int, CrossThreadCopier<RefPtr<CopierRefCountedTest>>::Type>::value), "CrossThreadCopier specialization improperly applied to RefPtr<> of a RefCounted (but not ThreadSafeRefCounted) type");
+static_assert((std::is_same<int, CrossThreadCopier<CopierRefCountedTest*>::Type>::value), "CrossThreadCopier specialization improperly applied to raw pointer of a RefCounted (but not ThreadSafeRefCounted) type");
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/CrossThreadCopier.h b/Source/WTF/wtf/CrossThreadCopier.h
new file mode 100644
index 000000000..f0c86e550
--- /dev/null
+++ b/Source/WTF/wtf/CrossThreadCopier.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2009, 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Assertions.h>
+#include <wtf/Forward.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefPtr.h>
+#include <wtf/Threading.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+struct CrossThreadCopierBaseHelper {
+ template<typename T> struct RemovePointer {
+ typedef T Type;
+ };
+ template<typename T> struct RemovePointer<T*> {
+ typedef T Type;
+ };
+
+ template<typename T> struct RemovePointer<RefPtr<T>> {
+ typedef T Type;
+ };
+
+ template<typename T> struct RemovePointer<PassRefPtr<T>> {
+ typedef T Type;
+ };
+
+ template<typename T> struct IsEnumOrConvertibleToInteger {
+ static const bool value = std::is_integral<T>::value || std::is_enum<T>::value || std::is_convertible<T, long double>::value;
+ };
+
+ template<typename T> struct IsThreadSafeRefCountedPointer {
+ static const bool value = std::is_convertible<typename RemovePointer<T>::Type*, ThreadSafeRefCounted<typename RemovePointer<T>::Type>*>::value;
+ };
+};
+
+template<typename T> struct CrossThreadCopierPassThrough {
+ typedef T Type;
+ static Type copy(const T& parameter)
+ {
+ return parameter;
+ }
+};
+
+template<bool isEnumOrConvertibleToInteger, bool isThreadSafeRefCounted, typename T> struct CrossThreadCopierBase;
+
+// Integers get passed through without any changes.
+template<typename T> struct CrossThreadCopierBase<true, false, T> : public CrossThreadCopierPassThrough<T> {
+};
+
+// Classes that have an isolatedCopy() method get a default specialization.
+template<class T> struct CrossThreadCopierBase<false, false, T> {
+ static T copy(const T& value)
+ {
+ return value.isolatedCopy();
+ }
+};
+
+// Custom copy methods.
+template<typename T> struct CrossThreadCopierBase<false, true, T> {
+ typedef typename CrossThreadCopierBaseHelper::RemovePointer<T>::Type RefCountedType;
+ static_assert(std::is_convertible<RefCountedType*, ThreadSafeRefCounted<RefCountedType>*>::value, "T is not convertible to ThreadSafeRefCounted!");
+
+ typedef PassRefPtr<RefCountedType> Type;
+ static Type copy(const T& refPtr)
+ {
+ return refPtr;
+ }
+};
+
+template<> struct CrossThreadCopierBase<false, false, std::chrono::system_clock::time_point> {
+ typedef std::chrono::system_clock::time_point Type;
+ static Type copy(const Type& source)
+ {
+ return source;
+ }
+};
+
+template<> struct CrossThreadCopierBase<false, false, WTF::ASCIILiteral> {
+ typedef WTF::ASCIILiteral Type;
+ static Type copy(const Type& source)
+ {
+ return source;
+ }
+};
+
+template<typename T>
+struct CrossThreadCopier : public CrossThreadCopierBase<CrossThreadCopierBaseHelper::IsEnumOrConvertibleToInteger<T>::value, CrossThreadCopierBaseHelper::IsThreadSafeRefCountedPointer<T>::value, T> {
+};
+
+// Default specialization for Vectors of CrossThreadCopyable classes.
+template<typename T> struct CrossThreadCopierBase<false, false, Vector<T>> {
+ typedef Vector<T> Type;
+ static Type copy(const Type& source)
+ {
+ Type destination;
+ destination.reserveInitialCapacity(source.size());
+ for (auto& object : source)
+ destination.uncheckedAppend(CrossThreadCopier<T>::copy(object));
+ return destination;
+ }
+};
+
+} // namespace WTF
+
+using WTF::CrossThreadCopierBaseHelper;
+using WTF::CrossThreadCopierBase;
+using WTF::CrossThreadCopier;
diff --git a/Source/WTF/wtf/CrossThreadQueue.h b/Source/WTF/wtf/CrossThreadQueue.h
new file mode 100644
index 000000000..e0b4781df
--- /dev/null
+++ b/Source/WTF/wtf/CrossThreadQueue.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <limits>
+#include <wtf/Assertions.h>
+#include <wtf/Condition.h>
+#include <wtf/Deque.h>
+#include <wtf/Lock.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Optional.h>
+#include <wtf/Threading.h>
+
+namespace WTF {
+
+template<typename DataType>
+class CrossThreadQueue {
+ WTF_MAKE_NONCOPYABLE(CrossThreadQueue);
+public:
+ CrossThreadQueue() = default;
+
+ void append(DataType&&);
+
+ DataType waitForMessage();
+ std::optional<DataType> tryGetMessage();
+
+ bool isKilled() const { return false; }
+
+private:
+ mutable Lock m_lock;
+ Condition m_condition;
+ Deque<DataType> m_queue;
+};
+
+template<typename DataType>
+void CrossThreadQueue<DataType>::append(DataType&& message)
+{
+ LockHolder lock(m_lock);
+ m_queue.append(WTFMove(message));
+ m_condition.notifyOne();
+}
+
+template<typename DataType>
+DataType CrossThreadQueue<DataType>::waitForMessage()
+{
+ LockHolder lock(m_lock);
+
+ auto found = m_queue.end();
+ while (found == m_queue.end()) {
+ found = m_queue.begin();
+ if (found != m_queue.end())
+ break;
+
+ m_condition.wait(m_lock);
+ }
+
+ return m_queue.takeFirst();
+}
+
+template<typename DataType>
+std::optional<DataType> CrossThreadQueue<DataType>::tryGetMessage()
+{
+ LockHolder lock(m_lock);
+
+ if (m_queue.isEmpty())
+ return { };
+
+ return m_queue.takeFirst();
+}
+
+} // namespace WTF
+
+using WTF::CrossThreadQueue;
diff --git a/Source/WTF/wtf/CrossThreadTask.h b/Source/WTF/wtf/CrossThreadTask.h
new file mode 100644
index 000000000..0a66ec17f
--- /dev/null
+++ b/Source/WTF/wtf/CrossThreadTask.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013, 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/CrossThreadCopier.h>
+#include <wtf/Function.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+class CrossThreadTask {
+public:
+ CrossThreadTask() = default;
+
+ CrossThreadTask(Function<void ()>&& taskFunction)
+ : m_taskFunction(WTFMove(taskFunction))
+ {
+ ASSERT(m_taskFunction);
+ }
+
+ void performTask()
+ {
+ m_taskFunction();
+ }
+
+protected:
+ Function<void ()> m_taskFunction;
+};
+
+template <typename T>
+T crossThreadCopy(const T& t)
+{
+ return CrossThreadCopier<T>::copy(t);
+}
+
+template <typename F, typename ArgsTuple, size_t... ArgsIndex>
+void callFunctionForCrossThreadTaskImpl(F function, ArgsTuple&& args, std::index_sequence<ArgsIndex...>)
+{
+ function(std::get<ArgsIndex>(std::forward<ArgsTuple>(args))...);
+}
+
+template <typename F, typename ArgsTuple, typename ArgsIndices = std::make_index_sequence<std::tuple_size<ArgsTuple>::value>>
+void callFunctionForCrossThreadTask(F function, ArgsTuple&& args)
+{
+ callFunctionForCrossThreadTaskImpl(function, std::forward<ArgsTuple>(args), ArgsIndices());
+}
+
+template<typename... Parameters, typename... Arguments>
+CrossThreadTask createCrossThreadTask(void (*method)(Parameters...), const Arguments&... arguments)
+{
+ return CrossThreadTask([method, arguments = std::make_tuple(crossThreadCopy<Arguments>(arguments)...)]() mutable {
+ callFunctionForCrossThreadTask(method, WTFMove(arguments));
+ });
+}
+
+template <typename C, typename MF, typename ArgsTuple, size_t... ArgsIndex>
+void callMemberFunctionForCrossThreadTaskImpl(C* object, MF function, ArgsTuple&& args, std::index_sequence<ArgsIndex...>)
+{
+ (object->*function)(std::get<ArgsIndex>(std::forward<ArgsTuple>(args))...);
+}
+
+template <typename C, typename MF, typename ArgsTuple, typename ArgsIndicies = std::make_index_sequence<std::tuple_size<ArgsTuple>::value>>
+void callMemberFunctionForCrossThreadTask(C* object, MF function, ArgsTuple&& args)
+{
+ callMemberFunctionForCrossThreadTaskImpl(object, function, std::forward<ArgsTuple>(args), ArgsIndicies());
+}
+
+template<typename T, typename... Parameters, typename... Arguments>
+CrossThreadTask createCrossThreadTask(T& callee, void (T::*method)(Parameters...), const Arguments&... arguments)
+{
+ return CrossThreadTask([callee = &callee, method, arguments = std::make_tuple(crossThreadCopy<Arguments>(arguments)...)]() mutable {
+ callMemberFunctionForCrossThreadTask(callee, method, WTFMove(arguments));
+ });
+}
+
+} // namespace WTF
+
+using WTF::CrossThreadTask;
+using WTF::createCrossThreadTask;
diff --git a/Source/WTF/wtf/PossiblyNull.h b/Source/WTF/wtf/CryptographicUtilities.cpp
index 46a7d713b..600c8cec7 100644
--- a/Source/WTF/wtf/PossiblyNull.h
+++ b/Source/WTF/wtf/CryptographicUtilities.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,40 +20,25 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef PossiblyNull_h
-#define PossiblyNull_h
-
-#include <wtf/Assertions.h>
+#include "config.h"
+#include "CryptographicUtilities.h"
namespace WTF {
-template <typename T> struct PossiblyNull {
- PossiblyNull(T data)
- : m_data(data)
- {
- }
- PossiblyNull(const PossiblyNull<T>& source)
- : m_data(source.m_data)
- {
- source.m_data = 0;
- }
- ~PossiblyNull() { ASSERT(!m_data); }
- bool getValue(T& out) WARN_UNUSED_RETURN;
-private:
- mutable T m_data;
-};
-
-template <typename T> bool PossiblyNull<T>::getValue(T& out)
+// FIXME: Use platform APIs where available. See <rdar://problem/12685603> for Mac/iOS.
+NEVER_INLINE int constantTimeMemcmp(const void* voidA, const void* voidB, size_t length)
{
- out = m_data;
- bool result = !!m_data;
- m_data = 0;
+ const uint8_t* a = static_cast<const uint8_t*>(voidA);
+ const uint8_t* b = static_cast<const uint8_t*>(voidB);
+
+ uint8_t result = 0;
+ for (size_t i = 0; i < length; ++i)
+ result |= a[i] ^ b[i];
+
return result;
}
}
-
-#endif
diff --git a/Source/WTF/wtf/CryptographicUtilities.h b/Source/WTF/wtf/CryptographicUtilities.h
new file mode 100644
index 000000000..452a4a83c
--- /dev/null
+++ b/Source/WTF/wtf/CryptographicUtilities.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_CryptographicUtilities_h
+#define WTF_CryptographicUtilities_h
+
+namespace WTF {
+
+// Returns zero if arrays are equal, and non-zero otherwise. Execution time does not depend on array contents.
+WTF_EXPORT_PRIVATE int constantTimeMemcmp(const void*, const void*, size_t length);
+
+}
+
+using WTF::constantTimeMemcmp;
+
+#endif
diff --git a/Source/WTF/wtf/CryptographicallyRandomNumber.cpp b/Source/WTF/wtf/CryptographicallyRandomNumber.cpp
index 123608616..8bcb91607 100644
--- a/Source/WTF/wtf/CryptographicallyRandomNumber.cpp
+++ b/Source/WTF/wtf/CryptographicallyRandomNumber.cpp
@@ -33,6 +33,7 @@
#include "NeverDestroyed.h"
#include "OSRandomSource.h"
#include <mutex>
+#include <wtf/Lock.h>
namespace WTF {
@@ -64,7 +65,7 @@ private:
ARC4Stream m_stream;
int m_count;
- std::mutex m_mutex;
+ Lock m_mutex;
};
ARC4Stream::ARC4Stream()
@@ -136,7 +137,7 @@ uint32_t ARC4RandomNumberGenerator::getWord()
uint32_t ARC4RandomNumberGenerator::randomNumber()
{
- std::lock_guard<std::mutex> lock(m_mutex);
+ std::lock_guard<Lock> lock(m_mutex);
m_count -= 4;
stirIfNeeded();
@@ -145,7 +146,7 @@ uint32_t ARC4RandomNumberGenerator::randomNumber()
void ARC4RandomNumberGenerator::randomValues(void* buffer, size_t length)
{
- std::lock_guard<std::mutex> lock(m_mutex);
+ std::lock_guard<Lock> lock(m_mutex);
unsigned char* result = reinterpret_cast<unsigned char*>(buffer);
stirIfNeeded();
diff --git a/Source/WTF/wtf/CryptographicallyRandomNumber.h b/Source/WTF/wtf/CryptographicallyRandomNumber.h
index 6e8f8756e..8e9f271f1 100644
--- a/Source/WTF/wtf/CryptographicallyRandomNumber.h
+++ b/Source/WTF/wtf/CryptographicallyRandomNumber.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
diff --git a/Source/WTF/wtf/CurrentTime.cpp b/Source/WTF/wtf/CurrentTime.cpp
index a50da4151..71afebe44 100644
--- a/Source/WTF/wtf/CurrentTime.cpp
+++ b/Source/WTF/wtf/CurrentTime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2008, 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2006-2016 Apple Inc. All rights reserved.
* Copyright (C) 2008 Google Inc. All rights reserved.
* Copyright (C) 2007-2009 Torch Mobile, Inc.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
@@ -34,9 +34,13 @@
#include "config.h"
#include "CurrentTime.h"
+#include "Condition.h"
+#include "Lock.h"
+
#if OS(DARWIN)
#include <mach/mach.h>
#include <mach/mach_time.h>
+#include <mutex>
#include <sys/time.h>
#elif OS(WINDOWS)
@@ -48,14 +52,11 @@
#include <math.h>
#include <stdint.h>
#include <time.h>
-
-#elif PLATFORM(EFL)
-#include <Ecore.h>
#else
#include <sys/time.h>
#endif
-#if USE(GLIB) && !PLATFORM(EFL)
+#if USE(GLIB)
#include <glib.h>
#endif
@@ -71,11 +72,7 @@ static double lowResUTCTime()
{
FILETIME fileTime;
-#if OS(WINCE)
- GetCurrentFT(&fileTime);
-#else
GetSystemTimeAsFileTime(&fileTime);
-#endif
// As per Windows documentation for FILETIME, copy the resulting FILETIME structure to a
// ULARGE_INTEGER structure using memcpy (using memcpy instead of direct assignment can
@@ -106,7 +103,11 @@ static double highResUpTime()
LARGE_INTEGER qpc;
QueryPerformanceCounter(&qpc);
+#if defined(_M_IX86) || defined(__i386__)
DWORD tickCount = GetTickCount();
+#else
+ ULONGLONG tickCount = GetTickCount64();
+#endif
if (inited) {
__int64 qpcElapsed = ((qpc.QuadPart - qpcLast.QuadPart) * 1000) / qpcFrequency.QuadPart;
@@ -218,7 +219,7 @@ double currentTime()
#endif // USE(QUERY_PERFORMANCE_COUNTER)
-#elif USE(GLIB) && !PLATFORM(EFL)
+#elif USE(GLIB)
// Note: GTK on Windows will pick up the PLATFORM(WIN) implementation above which provides
// better accuracy compared with Windows implementation of g_get_current_time:
@@ -231,13 +232,6 @@ double currentTime()
return static_cast<double>(now.tv_sec) + static_cast<double>(now.tv_usec / 1000000.0);
}
-#elif PLATFORM(EFL)
-
-double currentTime()
-{
- return ecore_time_unix_get();
-}
-
#else
double currentTime()
@@ -249,31 +243,27 @@ double currentTime()
#endif
-#if PLATFORM(MAC)
+#if USE(GLIB)
double monotonicallyIncreasingTime()
{
- // Based on listing #2 from Apple QA 1398.
- static mach_timebase_info_data_t timebaseInfo;
- if (!timebaseInfo.denom) {
- kern_return_t kr = mach_timebase_info(&timebaseInfo);
- ASSERT_UNUSED(kr, kr == KERN_SUCCESS);
- }
- return (mach_absolute_time() * timebaseInfo.numer) / (1.0e9 * timebaseInfo.denom);
+ return static_cast<double>(g_get_monotonic_time() / 1000000.0);
}
-#elif PLATFORM(EFL)
+#elif OS(DARWIN)
double monotonicallyIncreasingTime()
{
- return ecore_time_get();
-}
-
-#elif USE(GLIB) && !PLATFORM(EFL)
+ // Based on listing #2 from Apple QA 1398, but modified to be thread-safe.
+ static mach_timebase_info_data_t timebaseInfo;
+ static std::once_flag initializeTimerOnceFlag;
+ std::call_once(initializeTimerOnceFlag, [] {
+ kern_return_t kr = mach_timebase_info(&timebaseInfo);
+ ASSERT_UNUSED(kr, kr == KERN_SUCCESS);
+ ASSERT(timebaseInfo.denom);
+ });
-double monotonicallyIncreasingTime()
-{
- return static_cast<double>(g_get_monotonic_time() / 1000000.0);
+ return (mach_absolute_time() * timebaseInfo.numer) / (1.0e9 * timebaseInfo.denom);
}
#else
@@ -290,7 +280,7 @@ double monotonicallyIncreasingTime()
#endif
-double currentCPUTime()
+std::chrono::microseconds currentCPUTime()
{
#if OS(DARWIN)
mach_msg_type_number_t infoCount = THREAD_BASIC_INFO_COUNT;
@@ -300,11 +290,8 @@ double currentCPUTime()
mach_port_t threadPort = mach_thread_self();
thread_info(threadPort, THREAD_BASIC_INFO, reinterpret_cast<thread_info_t>(&info), &infoCount);
mach_port_deallocate(mach_task_self(), threadPort);
-
- double time = info.user_time.seconds + info.user_time.microseconds / 1000000.;
- time += info.system_time.seconds + info.system_time.microseconds / 1000000.;
-
- return time;
+
+ return std::chrono::seconds(info.user_time.seconds + info.system_time.seconds) + std::chrono::microseconds(info.user_time.microseconds + info.system_time.microseconds);
#elif OS(WINDOWS)
union {
FILETIME fileTime;
@@ -316,20 +303,27 @@ double currentCPUTime()
FILETIME creationTime, exitTime;
GetThreadTimes(GetCurrentThread(), &creationTime, &exitTime, &kernelTime.fileTime, &userTime.fileTime);
-
- return userTime.fileTimeAsLong / 10000000. + kernelTime.fileTimeAsLong / 10000000.;
+
+ return std::chrono::microseconds((userTime.fileTimeAsLong + kernelTime.fileTimeAsLong) / 10);
#else
// FIXME: We should return the time the current thread has spent executing.
- // use a relative time from first call in order to avoid an overflow
- static double firstTime = currentTime();
- return currentTime() - firstTime;
+ static auto firstTime = std::chrono::steady_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - firstTime);
#endif
}
-double currentCPUTimeMS()
+void sleep(double value)
{
- return currentCPUTime() * 1000;
+ // It's very challenging to find portable ways of sleeping for less than a second. On UNIX, you want to
+ // use usleep() but it's hard to #include it in a portable way (you'd think it's in unistd.h, but then
+ // you'd be wrong on some OSX SDKs). Also, usleep() won't save you on Windows. Hence, bottoming out in
+ // lock code, which already solves the sleeping problem, is probably for the best.
+
+ Lock fakeLock;
+ Condition fakeCondition;
+ LockHolder fakeLocker(fakeLock);
+ fakeCondition.waitFor(fakeLock, Seconds(value));
}
} // namespace WTF
diff --git a/Source/WTF/wtf/CurrentTime.h b/Source/WTF/wtf/CurrentTime.h
index 78e58017f..74b236f79 100644
--- a/Source/WTF/wtf/CurrentTime.h
+++ b/Source/WTF/wtf/CurrentTime.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (C) 2006 Apple Inc. All rights reserved.
* Copyright (C) 2008 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,6 +32,7 @@
#ifndef CurrentTime_h
#define CurrentTime_h
+#include <chrono>
#include <time.h>
namespace WTF {
@@ -55,19 +56,31 @@ inline double currentTimeMS()
// NTP or manual adjustments, so it is better suited for elapsed time measurement.
WTF_EXPORT_PRIVATE double monotonicallyIncreasingTime();
-// Returns the current CPU time of the current thread in seconds.
+inline double monotonicallyIncreasingTimeMS()
+{
+ return monotonicallyIncreasingTime() * 1000.0;
+}
+
+// Returns the current CPU time of the current thread.
// Precision varies depending on platform but is usually as good or better
// than a millisecond.
-WTF_EXPORT_PRIVATE double currentCPUTime();
+WTF_EXPORT_PRIVATE std::chrono::microseconds currentCPUTime();
+
+WTF_EXPORT_PRIVATE void sleep(double);
-// Returns the current CPU time of the current thread in milliseconds.
-WTF_EXPORT_PRIVATE double currentCPUTimeMS();
+inline void sleepMS(double value)
+{
+ sleep(value / 1000.0);
+}
} // namespace WTF
+using WTF::currentCPUTime;
using WTF::currentTime;
using WTF::currentTimeMS;
using WTF::monotonicallyIncreasingTime;
-using WTF::currentCPUTime;
+using WTF::monotonicallyIncreasingTimeMS;
+using WTF::sleep;
+using WTF::sleepMS;
#endif // CurrentTime_h
diff --git a/Source/WTF/wtf/DataLog.cpp b/Source/WTF/wtf/DataLog.cpp
index 9b3525cfe..cab141493 100644
--- a/Source/WTF/wtf/DataLog.cpp
+++ b/Source/WTF/wtf/DataLog.cpp
@@ -26,83 +26,125 @@
#include "config.h"
#include "DataLog.h"
#include <stdarg.h>
+#include <string.h>
#include <wtf/FilePrintStream.h>
-#include <wtf/WTFThreadData.h>
+#include <wtf/LockedPrintStream.h>
#include <wtf/Threading.h>
+#include <mutex>
+#include <thread>
-#if OS(UNIX)
+#if OS(UNIX) || OS(DARWIN)
#include <unistd.h>
#endif
-#if OS(WINCE)
-#ifndef _IONBF
-#define _IONBF 0x0004
-#endif
-#endif
-
#define DATA_LOG_TO_FILE 0
-// Uncomment to force logging to the given file regardless of what the environment variable says. Note that
-// we will append ".<pid>.txt" where <pid> is the PID.
+// Set to 1 to use the temp directory from confstr instead of hardcoded directory.
+// The last component of DATA_LOG_FILENAME will still be used.
+#define DATA_LOG_TO_DARWIN_TEMP_DIR 0
+// Uncomment to force logging to the given file regardless of what the environment variable says.
+// Note that we will append ".<pid>.txt" where <pid> is the PID.
// This path won't work on Windows, make sure to change to something like C:\\Users\\<more path>\\log.txt.
#define DATA_LOG_FILENAME "/tmp/WTFLog"
namespace WTF {
-#if USE(PTHREADS)
-static pthread_once_t initializeLogFileOnceKey = PTHREAD_ONCE_INIT;
-#endif
-
-static FilePrintStream* file;
+static PrintStream* s_file;
-static uint64_t fileData[(sizeof(FilePrintStream) + 7) / 8];
+static uint64_t s_fileData[(sizeof(FilePrintStream) + 7) / 8];
+static uint64_t s_lockedFileData[(sizeof(LockedPrintStream) + 7) / 8];
static void initializeLogFileOnce()
{
+ FilePrintStream* file = nullptr;
+
#if DATA_LOG_TO_FILE
-#ifdef DATA_LOG_FILENAME
+ const long maxPathLength = 1024;
+
+ char filenameSuffix[maxPathLength + 1];
+
+#if PLATFORM(WIN)
+ _snprintf(filenameSuffix, sizeof(filenameSuffix), ".%d.txt", GetCurrentProcessId());
+#else
+ snprintf(filenameSuffix, sizeof(filenameSuffix), ".%d.txt", getpid());
+#endif
+
+#if DATA_LOG_TO_DARWIN_TEMP_DIR
+ char filenameBuffer[maxPathLength + 1];
+ unsigned suffixLength = strlen(filenameSuffix);
+
+#if defined(DATA_LOG_FILENAME)
+ char* logBasename = strrchr(DATA_LOG_FILENAME, '/');
+ if (!logBasename)
+ logBasename = (char*)DATA_LOG_FILENAME;
+#else
+ const char* logBasename = "WTFLog";
+#endif
+
+ const char* filename = nullptr;
+
+ bool success = confstr(_CS_DARWIN_USER_TEMP_DIR, filenameBuffer, sizeof(filenameBuffer));
+ if (success) {
+ // FIXME: Assert that the path ends with a slash instead of adding a slash if it does not exist
+ // once <rdar://problem/23579077> is fixed in all iOS Simulator versions that we use.
+ size_t lastComponentLength = strlen(logBasename) + suffixLength;
+ size_t dirnameLength = strlen(filenameBuffer);
+ bool shouldAddPathSeparator = filenameBuffer[dirnameLength - 1] != '/' && logBasename[0] != '/';
+ if (lastComponentLength + shouldAddPathSeparator <= sizeof(filenameBuffer) - dirnameLength - 1) {
+ if (shouldAddPathSeparator)
+ strncat(filenameBuffer, "/", 1);
+ strncat(filenameBuffer, logBasename, sizeof(filenameBuffer) - strlen(filenameBuffer) - 1);
+ filename = filenameBuffer;
+ }
+ }
+#elif defined(DATA_LOG_FILENAME)
const char* filename = DATA_LOG_FILENAME;
#else
const char* filename = getenv("WTF_DATA_LOG_FILENAME");
#endif
- char actualFilename[1024];
+ char actualFilename[maxPathLength + 1];
+ if (filename) {
#if PLATFORM(WIN)
- _snprintf(actualFilename, sizeof(actualFilename), "%s.%d.txt", filename, GetCurrentProcessId());
+ _snprintf(actualFilename, sizeof(actualFilename), "%s%s", filename, filenameSuffix);
#else
- snprintf(actualFilename, sizeof(actualFilename), "%s.%d.txt", filename, getpid());
+ snprintf(actualFilename, sizeof(actualFilename), "%s%s", filename, filenameSuffix);
#endif
-
- if (filename) {
+
file = FilePrintStream::open(actualFilename, "w").release();
- if (!file)
- fprintf(stderr, "Warning: Could not open log file %s for writing.\n", actualFilename);
+ if (file)
+ WTFLogAlways("*** DataLog output to \"%s\" ***\n", actualFilename);
+ else
+ WTFLogAlways("Warning: Could not open DataLog file %s for writing.\n", actualFilename);
}
#endif // DATA_LOG_TO_FILE
+
if (!file) {
// Use placement new; this makes it easier to use dataLog() to debug
// fastMalloc.
- file = new (fileData) FilePrintStream(stderr, FilePrintStream::Borrow);
+ file = new (s_fileData) FilePrintStream(stderr, FilePrintStream::Borrow);
}
setvbuf(file->file(), 0, _IONBF, 0); // Prefer unbuffered output, so that we get a full log upon crash or deadlock.
+
+ s_file = new (s_lockedFileData) LockedPrintStream(std::unique_ptr<FilePrintStream>(file));
}
static void initializeLogFile()
{
-#if USE(PTHREADS)
- pthread_once(&initializeLogFileOnceKey, initializeLogFileOnce);
-#else
- if (!file)
- initializeLogFileOnce();
-#endif
+ static std::once_flag once;
+ std::call_once(
+ once,
+ [] {
+ initializeLogFileOnce();
+ });
}
-FilePrintStream& dataFile()
+PrintStream& dataFile()
{
initializeLogFile();
- return *file;
+ return *s_file;
}
void dataLogFV(const char* format, va_list argList)
diff --git a/Source/WTF/wtf/DataLog.h b/Source/WTF/wtf/DataLog.h
index 33cd9f5b0..13f913bd5 100644
--- a/Source/WTF/wtf/DataLog.h
+++ b/Source/WTF/wtf/DataLog.h
@@ -28,111 +28,33 @@
#include <stdarg.h>
#include <stdio.h>
-#include <wtf/FilePrintStream.h>
-#include <wtf/Platform.h>
+#include <wtf/PrintStream.h>
#include <wtf/StdLibExtras.h>
namespace WTF {
-WTF_EXPORT_PRIVATE FilePrintStream& dataFile();
+WTF_EXPORT_PRIVATE PrintStream& dataFile();
WTF_EXPORT_PRIVATE void dataLogFV(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(1, 0);
WTF_EXPORT_PRIVATE void dataLogF(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
WTF_EXPORT_PRIVATE void dataLogFString(const char*);
-template<typename T>
-void dataLog(const T& value)
+template<typename... Types>
+void dataLog(const Types&... values)
{
- dataFile().print(value);
+ dataFile().print(values...);
}
-template<typename T1, typename T2>
-void dataLog(const T1& value1, const T2& value2)
+template<typename... Types>
+void dataLogLn(const Types&... values)
{
- dataFile().print(value1, value2);
-}
-
-template<typename T1, typename T2, typename T3>
-void dataLog(const T1& value1, const T2& value2, const T3& value3)
-{
- dataFile().print(value1, value2, value3);
-}
-
-template<typename T1, typename T2, typename T3, typename T4>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4)
-{
- dataFile().print(value1, value2, value3, value4);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5)
-{
- dataFile().print(value1, value2, value3, value4, value5);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13, const T14& value14)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13, value14);
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14, typename T15>
-void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13, const T14& value14, const T15& value15)
-{
- dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13, value14, value15);
+ dataFile().print(values..., "\n");
}
} // namespace WTF
using WTF::dataLog;
+using WTF::dataLogLn;
using WTF::dataLogF;
using WTF::dataLogFString;
diff --git a/Source/WTF/wtf/DateMath.cpp b/Source/WTF/wtf/DateMath.cpp
index 654bd8d2d..1350e3040 100644
--- a/Source/WTF/wtf/DateMath.cpp
+++ b/Source/WTF/wtf/DateMath.cpp
@@ -121,18 +121,16 @@ static const int firstDayOfMonth[2][12] = {
{0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335}
};
-#if !OS(WINCE)
static inline void getLocalTime(const time_t* localTime, struct tm* localTM)
{
-#if COMPILER(MINGW)
- *localTM = *localtime(localTime);
-#elif COMPILER(MSVC)
+#if COMPILER(MSVC)
localtime_s(localTM, localTime);
-#else
+#elif HAVE(LOCALTIME_R)
localtime_r(localTime, localTM);
+#else
+ localtime_s(localTime, localTM);
#endif
}
-#endif
bool isLeapYear(int year)
{
@@ -166,7 +164,7 @@ static inline double daysFrom1970ToYear(int year)
const double yearsToExcludeBy100Rule = floor(yearMinusOne / 100.0) - excludedLeapDaysBefore1971By100Rule;
const double yearsToAddBy400Rule = floor(yearMinusOne / 400.0) - leapDaysBefore1971By400Rule;
- return 365.0 * (year - 1970) + yearsToAddBy4Rule - yearsToExcludeBy100Rule + yearsToAddBy400Rule;
+ return 365.0 * (year - 1970.0) + yearsToAddBy4Rule - yearsToExcludeBy100Rule + yearsToAddBy400Rule;
}
double msToDays(double ms)
@@ -359,16 +357,56 @@ int equivalentYearForDST(int year)
int product = (quotient) * 28;
year += product;
- ASSERT((year >= minYear && year <= maxYear) || (product - year == static_cast<int>(std::numeric_limits<double>::quiet_NaN())));
return year;
}
+#if OS(WINDOWS)
+typedef BOOL(WINAPI* callGetTimeZoneInformationForYear_t)(USHORT, PDYNAMIC_TIME_ZONE_INFORMATION, LPTIME_ZONE_INFORMATION);
+
+static callGetTimeZoneInformationForYear_t timeZoneInformationForYearFunction()
+{
+ static callGetTimeZoneInformationForYear_t getTimeZoneInformationForYear = nullptr;
+
+ if (getTimeZoneInformationForYear)
+ return getTimeZoneInformationForYear;
+
+ HMODULE module = ::GetModuleHandleW(L"kernel32.dll");
+ if (!module)
+ return nullptr;
+
+ getTimeZoneInformationForYear = reinterpret_cast<callGetTimeZoneInformationForYear_t>(::GetProcAddress(module, "GetTimeZoneInformationForYear"));
+
+ return getTimeZoneInformationForYear;
+}
+#endif
+
static int32_t calculateUTCOffset()
{
#if OS(WINDOWS)
TIME_ZONE_INFORMATION timeZoneInformation;
- GetTimeZoneInformation(&timeZoneInformation);
- int32_t bias = timeZoneInformation.Bias + timeZoneInformation.StandardBias;
+ DWORD rc = 0;
+
+ if (callGetTimeZoneInformationForYear_t timeZoneFunction = timeZoneInformationForYearFunction()) {
+ // If available, use the Windows API call that takes into account the varying DST from
+ // year to year.
+ SYSTEMTIME systemTime;
+ ::GetSystemTime(&systemTime);
+ rc = timeZoneFunction(systemTime.wYear, nullptr, &timeZoneInformation);
+ if (rc == TIME_ZONE_ID_INVALID)
+ return 0;
+ } else {
+ rc = ::GetTimeZoneInformation(&timeZoneInformation);
+ if (rc == TIME_ZONE_ID_INVALID)
+ return 0;
+ }
+
+ int32_t bias = timeZoneInformation.Bias;
+
+ if (rc == TIME_ZONE_ID_DAYLIGHT)
+ bias += timeZoneInformation.DaylightBias;
+ else if (rc == TIME_ZONE_ID_STANDARD || rc == TIME_ZONE_ID_UNKNOWN)
+ bias += timeZoneInformation.StandardBias;
+
return -bias * 60 * 1000;
#else
time_t localTime = time(0);
@@ -424,16 +462,14 @@ static void UnixTimeToFileTime(time_t t, LPFILETIME pft)
*/
static double calculateDSTOffset(time_t localTime, double utcOffset)
{
-#if OS(WINCE)
- UNUSED_PARAM(localTime);
- UNUSED_PARAM(utcOffset);
- return 0;
-#elif OS(WINDOWS)
+#if OS(WINDOWS)
FILETIME utcFileTime;
UnixTimeToFileTime(localTime, &utcFileTime);
SYSTEMTIME utcSystemTime, localSystemTime;
- FileTimeToSystemTime(&utcFileTime, &utcSystemTime);
- SystemTimeToTzSpecificLocalTime(0, &utcSystemTime, &localSystemTime);
+ if (!::FileTimeToSystemTime(&utcFileTime, &utcSystemTime))
+ return 0;
+ if (!::SystemTimeToTzSpecificLocalTime(nullptr, &utcSystemTime, &localSystemTime))
+ return 0;
double offsetTime = (localTime * msPerSecond) + utcOffset;
@@ -524,12 +560,10 @@ void initializeDates()
static inline double ymdhmsToSeconds(int year, long mon, long day, long hour, long minute, double second)
{
- double days = (day - 32075)
- + floor(1461 * (year + 4800.0 + (mon - 14) / 12) / 4)
- + 367 * (mon - 2 - (mon - 14) / 12 * 12) / 12
- - floor(3 * ((year + 4900.0 + (mon - 14) / 12) / 100) / 4)
- - 2440588;
- return ((days * hoursPerDay + hour) * minutesPerHour + minute) * secondsPerMinute + second;
+ int mday = firstDayOfMonth[isLeapYear(year)][mon - 1];
+ double ydays = daysFrom1970ToYear(year);
+
+ return (second + minute * secondsPerMinute + hour * secondsPerHour + (mday + day - 1 + ydays) * secondsPerDay);
}
// We follow the recommendation of RFC 2822 to consider all
diff --git a/Source/WTF/wtf/DeferrableRefCounted.h b/Source/WTF/wtf/DeferrableRefCounted.h
index 6b9229769..796481a33 100644
--- a/Source/WTF/wtf/DeferrableRefCounted.h
+++ b/Source/WTF/wtf/DeferrableRefCounted.h
@@ -20,11 +20,10 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DeferrableRefCounted_h
-#define DeferrableRefCounted_h
+#pragma once
#include <wtf/Assertions.h>
#include <wtf/FastMalloc.h>
@@ -41,39 +40,39 @@ namespace WTF {
class DeferrableRefCountedBase {
static const unsigned deferredFlag = 1;
static const unsigned normalIncrement = 2;
-
+
public:
- void ref()
+ void ref() const
{
m_refCount += normalIncrement;
}
-
+
bool hasOneRef() const
{
return refCount() == 1;
}
-
+
unsigned refCount() const
{
return m_refCount / normalIncrement;
}
-
+
bool isDeferred() const
{
return !!(m_refCount & deferredFlag);
}
-
+
protected:
DeferrableRefCountedBase()
: m_refCount(normalIncrement)
{
}
-
+
~DeferrableRefCountedBase()
{
}
-
- bool derefBase()
+
+ bool derefBase() const
{
m_refCount -= normalIncrement;
return !m_refCount;
@@ -88,21 +87,21 @@ protected:
m_refCount &= ~deferredFlag;
return !m_refCount;
}
-
+
private:
- unsigned m_refCount;
+ mutable unsigned m_refCount;
};
template<typename T>
class DeferrableRefCounted : public DeferrableRefCountedBase {
WTF_MAKE_NONCOPYABLE(DeferrableRefCounted); WTF_MAKE_FAST_ALLOCATED;
public:
- void deref()
+ void deref() const
{
if (derefBase())
- delete static_cast<T*>(this);
+ delete static_cast<const T*>(this);
}
-
+
bool setIsDeferred(bool value)
{
if (!setIsDeferredBase(value))
@@ -110,7 +109,7 @@ public:
delete static_cast<T*>(this);
return true;
}
-
+
protected:
DeferrableRefCounted() { }
~DeferrableRefCounted() { }
@@ -119,6 +118,3 @@ protected:
} // namespace WTF
using WTF::DeferrableRefCounted;
-
-#endif // DeferrableRefCounted_h
-
diff --git a/Source/WTF/wtf/PageAllocationAligned.h b/Source/WTF/wtf/DeprecatedOptional.h
index 5513d135d..f5966e3d7 100644
--- a/Source/WTF/wtf/PageAllocationAligned.h
+++ b/Source/WTF/wtf/DeprecatedOptional.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,48 +23,29 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef PageAllocationAligned_h
-#define PageAllocationAligned_h
+// This file contains a deprecated version of WTF::Optional which a released
+// version of Safari uses. Once Safari stops using this, we can remove this.
+// New code should use std::optional.
-#include <wtf/OSAllocator.h>
-#include <wtf/PageReservation.h>
+#pragma once
+
+#include <type_traits>
namespace WTF {
-class PageAllocationAligned : private PageBlock {
+template<typename T>
+class Optional {
public:
- PageAllocationAligned()
- {
- }
-
- using PageBlock::operator bool;
- using PageBlock::size;
- using PageBlock::base;
-
- WTF_EXPORT_PRIVATE static PageAllocationAligned allocate(size_t size, size_t alignment, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true);
-
- WTF_EXPORT_PRIVATE void deallocate();
+ explicit operator bool() const { return m_isEngaged; }
+ T& value() { return *asPtr(); }
private:
-#if OS(DARWIN)
- PageAllocationAligned(void* base, size_t size)
- : PageBlock(base, size, false)
- {
- }
-#else
- PageAllocationAligned(void* base, size_t size, void* reservationBase, size_t reservationSize)
- : PageBlock(base, size, false)
- , m_reservation(reservationBase, reservationSize, false)
- {
- }
+ T* asPtr() { return reinterpret_cast<T*>(&m_value); }
- PageBlock m_reservation;
-#endif
+ bool m_isEngaged;
+ typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type m_value;
};
+template<typename T> using DeprecatedOptional = WTF::Optional<T>;
} // namespace WTF
-
-using WTF::PageAllocationAligned;
-
-#endif // PageAllocationAligned_h
diff --git a/Source/WTF/wtf/Deque.h b/Source/WTF/wtf/Deque.h
index 6203090d2..2205a07c1 100644
--- a/Source/WTF/wtf/Deque.h
+++ b/Source/WTF/wtf/Deque.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2008, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2009 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -38,619 +38,648 @@
namespace WTF {
- template<typename T, size_t inlineCapacity> class DequeIteratorBase;
- template<typename T, size_t inlineCapacity> class DequeIterator;
- template<typename T, size_t inlineCapacity> class DequeConstIterator;
-
- template<typename T, size_t inlineCapacity = 0>
- class Deque {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- typedef DequeIterator<T, inlineCapacity> iterator;
- typedef DequeConstIterator<T, inlineCapacity> const_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
-
- Deque();
- Deque(const Deque<T, inlineCapacity>&);
- Deque& operator=(const Deque<T, inlineCapacity>&);
- ~Deque();
-
- void swap(Deque<T, inlineCapacity>&);
-
- size_t size() const { return m_start <= m_end ? m_end - m_start : m_end + m_buffer.capacity() - m_start; }
- bool isEmpty() const { return m_start == m_end; }
-
- iterator begin() { return iterator(this, m_start); }
- iterator end() { return iterator(this, m_end); }
- const_iterator begin() const { return const_iterator(this, m_start); }
- const_iterator end() const { return const_iterator(this, m_end); }
- reverse_iterator rbegin() { return reverse_iterator(end()); }
- reverse_iterator rend() { return reverse_iterator(begin()); }
- const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
- const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
-
- T& first() { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; }
- const T& first() const { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; }
- T takeFirst();
-
- T& last() { ASSERT(m_start != m_end); return *(--end()); }
- const T& last() const { ASSERT(m_start != m_end); return *(--end()); }
- T takeLast();
-
- template<typename U> void append(U&&);
- template<typename U> void prepend(U&&);
- void removeFirst();
- void removeLast();
- void remove(iterator&);
- void remove(const_iterator&);
-
- void clear();
-
- template<typename Predicate>
- iterator findIf(Predicate&&);
-
- private:
- friend class DequeIteratorBase<T, inlineCapacity>;
-
- typedef VectorBuffer<T, inlineCapacity> Buffer;
- typedef VectorTypeOperations<T> TypeOperations;
- typedef DequeIteratorBase<T, inlineCapacity> IteratorBase;
-
- void remove(size_t position);
- void invalidateIterators();
- void destroyAll();
- void checkValidity() const;
- void checkIndexValidity(size_t) const;
- void expandCapacityIfNeeded();
- void expandCapacity();
-
- size_t m_start;
- size_t m_end;
- Buffer m_buffer;
+template<typename T, size_t inlineCapacity> class DequeIteratorBase;
+template<typename T, size_t inlineCapacity> class DequeIterator;
+template<typename T, size_t inlineCapacity> class DequeConstIterator;
+
+template<typename T, size_t inlineCapacity = 0>
+class Deque {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ typedef DequeIterator<T, inlineCapacity> iterator;
+ typedef DequeConstIterator<T, inlineCapacity> const_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ Deque();
+ Deque(std::initializer_list<T>);
+ Deque(const Deque&);
+ Deque(Deque&&);
+ ~Deque();
+
+ Deque& operator=(const Deque&);
+ Deque& operator=(Deque&&);
+
+ void swap(Deque&);
+
+ size_t size() const { return m_start <= m_end ? m_end - m_start : m_end + m_buffer.capacity() - m_start; }
+ bool isEmpty() const { return m_start == m_end; }
+
+ iterator begin() { return iterator(this, m_start); }
+ iterator end() { return iterator(this, m_end); }
+ const_iterator begin() const { return const_iterator(this, m_start); }
+ const_iterator end() const { return const_iterator(this, m_end); }
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
+
+ T& first() { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; }
+ const T& first() const { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; }
+ T takeFirst();
+
+ T& last() { ASSERT(m_start != m_end); return *(--end()); }
+ const T& last() const { ASSERT(m_start != m_end); return *(--end()); }
+ T takeLast();
+
+ void append(T&& value) { append<T>(std::forward<T>(value)); }
+ template<typename U> void append(U&&);
+ template<typename U> void prepend(U&&);
+ void removeFirst();
+ void removeLast();
+ void remove(iterator&);
+ void remove(const_iterator&);
+
+ void clear();
+
+ template<typename Predicate>
+ iterator findIf(Predicate&&);
+
+private:
+ friend class DequeIteratorBase<T, inlineCapacity>;
+
+ typedef VectorBuffer<T, inlineCapacity> Buffer;
+ typedef VectorTypeOperations<T> TypeOperations;
+ typedef DequeIteratorBase<T, inlineCapacity> IteratorBase;
+
+ void remove(size_t position);
+ void invalidateIterators();
+ void destroyAll();
+ void checkValidity() const;
+ void checkIndexValidity(size_t) const;
+ void expandCapacityIfNeeded();
+ void expandCapacity();
+
+ size_t m_start;
+ size_t m_end;
+ Buffer m_buffer;
#ifndef NDEBUG
- mutable IteratorBase* m_iterators;
+ mutable IteratorBase* m_iterators;
#endif
- };
+};
- template<typename T, size_t inlineCapacity = 0>
- class DequeIteratorBase {
- protected:
- DequeIteratorBase();
- DequeIteratorBase(const Deque<T, inlineCapacity>*, size_t);
- DequeIteratorBase(const DequeIteratorBase&);
- DequeIteratorBase& operator=(const DequeIteratorBase&);
- ~DequeIteratorBase();
+template<typename T, size_t inlineCapacity = 0>
+class DequeIteratorBase {
+protected:
+ DequeIteratorBase();
+ DequeIteratorBase(const Deque<T, inlineCapacity>*, size_t);
+ DequeIteratorBase(const DequeIteratorBase&);
+ DequeIteratorBase& operator=(const DequeIteratorBase&);
+ ~DequeIteratorBase();
- void assign(const DequeIteratorBase& other) { *this = other; }
+ void assign(const DequeIteratorBase& other) { *this = other; }
- void increment();
- void decrement();
+ void increment();
+ void decrement();
- T* before() const;
- T* after() const;
+ T* before() const;
+ T* after() const;
- bool isEqual(const DequeIteratorBase&) const;
+ bool isEqual(const DequeIteratorBase&) const;
- private:
- void addToIteratorsList();
- void removeFromIteratorsList();
- void checkValidity() const;
- void checkValidity(const DequeIteratorBase&) const;
+private:
+ void addToIteratorsList();
+ void removeFromIteratorsList();
+ void checkValidity() const;
+ void checkValidity(const DequeIteratorBase&) const;
- Deque<T, inlineCapacity>* m_deque;
- size_t m_index;
+ Deque<T, inlineCapacity>* m_deque;
+ size_t m_index;
- friend class Deque<T, inlineCapacity>;
+ friend class Deque<T, inlineCapacity>;
#ifndef NDEBUG
- mutable DequeIteratorBase* m_next;
- mutable DequeIteratorBase* m_previous;
+ mutable DequeIteratorBase* m_next;
+ mutable DequeIteratorBase* m_previous;
#endif
- };
-
- template<typename T, size_t inlineCapacity = 0>
- class DequeIterator : public DequeIteratorBase<T, inlineCapacity> {
- private:
- typedef DequeIteratorBase<T, inlineCapacity> Base;
- typedef DequeIterator<T, inlineCapacity> Iterator;
-
- public:
- typedef ptrdiff_t difference_type;
- typedef T value_type;
- typedef T* pointer;
- typedef T& reference;
- typedef std::bidirectional_iterator_tag iterator_category;
-
- DequeIterator(Deque<T, inlineCapacity>* deque, size_t index) : Base(deque, index) { }
-
- DequeIterator(const Iterator& other) : Base(other) { }
- DequeIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
-
- T& operator*() const { return *Base::after(); }
- T* operator->() const { return Base::after(); }
-
- bool operator==(const Iterator& other) const { return Base::isEqual(other); }
- bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
-
- Iterator& operator++() { Base::increment(); return *this; }
- // postfix ++ intentionally omitted
- Iterator& operator--() { Base::decrement(); return *this; }
- // postfix -- intentionally omitted
- };
-
- template<typename T, size_t inlineCapacity = 0>
- class DequeConstIterator : public DequeIteratorBase<T, inlineCapacity> {
- private:
- typedef DequeIteratorBase<T, inlineCapacity> Base;
- typedef DequeConstIterator<T, inlineCapacity> Iterator;
- typedef DequeIterator<T, inlineCapacity> NonConstIterator;
-
- public:
- typedef ptrdiff_t difference_type;
- typedef T value_type;
- typedef const T* pointer;
- typedef const T& reference;
- typedef std::bidirectional_iterator_tag iterator_category;
-
- DequeConstIterator(const Deque<T, inlineCapacity>* deque, size_t index) : Base(deque, index) { }
-
- DequeConstIterator(const Iterator& other) : Base(other) { }
- DequeConstIterator(const NonConstIterator& other) : Base(other) { }
- DequeConstIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
- DequeConstIterator& operator=(const NonConstIterator& other) { Base::assign(other); return *this; }
-
- const T& operator*() const { return *Base::after(); }
- const T* operator->() const { return Base::after(); }
-
- bool operator==(const Iterator& other) const { return Base::isEqual(other); }
- bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
-
- Iterator& operator++() { Base::increment(); return *this; }
- // postfix ++ intentionally omitted
- Iterator& operator--() { Base::decrement(); return *this; }
- // postfix -- intentionally omitted
- };
+};
+
+template<typename T, size_t inlineCapacity = 0>
+class DequeIterator : public DequeIteratorBase<T, inlineCapacity> {
+private:
+ typedef DequeIteratorBase<T, inlineCapacity> Base;
+ typedef DequeIterator<T, inlineCapacity> Iterator;
+
+public:
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef T& reference;
+ typedef std::bidirectional_iterator_tag iterator_category;
+
+ DequeIterator(Deque<T, inlineCapacity>* deque, size_t index)
+ : Base(deque, index) { }
+
+ DequeIterator(const Iterator& other) : Base(other) { }
+ DequeIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
+
+ T& operator*() const { return *Base::after(); }
+ T* operator->() const { return Base::after(); }
+
+ bool operator==(const Iterator& other) const { return Base::isEqual(other); }
+ bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
+
+ Iterator& operator++() { Base::increment(); return *this; }
+ // postfix ++ intentionally omitted
+ Iterator& operator--() { Base::decrement(); return *this; }
+ // postfix -- intentionally omitted
+};
+
+template<typename T, size_t inlineCapacity = 0>
+class DequeConstIterator : public DequeIteratorBase<T, inlineCapacity> {
+private:
+ typedef DequeIteratorBase<T, inlineCapacity> Base;
+ typedef DequeConstIterator<T, inlineCapacity> Iterator;
+ typedef DequeIterator<T, inlineCapacity> NonConstIterator;
+
+public:
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef const T* pointer;
+ typedef const T& reference;
+ typedef std::bidirectional_iterator_tag iterator_category;
+
+ DequeConstIterator(const Deque<T, inlineCapacity>* deque, size_t index)
+ : Base(deque, index) { }
+
+ DequeConstIterator(const Iterator& other) : Base(other) { }
+ DequeConstIterator(const NonConstIterator& other) : Base(other) { }
+ DequeConstIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
+ DequeConstIterator& operator=(const NonConstIterator& other) { Base::assign(other); return *this; }
+
+ const T& operator*() const { return *Base::after(); }
+ const T* operator->() const { return Base::after(); }
+
+ bool operator==(const Iterator& other) const { return Base::isEqual(other); }
+ bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
+
+ Iterator& operator++() { Base::increment(); return *this; }
+ // postfix ++ intentionally omitted
+ Iterator& operator--() { Base::decrement(); return *this; }
+ // postfix -- intentionally omitted
+};
#ifdef NDEBUG
- template<typename T, size_t inlineCapacity> inline void Deque<T, inlineCapacity>::checkValidity() const { }
- template<typename T, size_t inlineCapacity> inline void Deque<T, inlineCapacity>::checkIndexValidity(size_t) const { }
- template<typename T, size_t inlineCapacity> inline void Deque<T, inlineCapacity>::invalidateIterators() { }
+template<typename T, size_t inlineCapacity> inline void Deque<T, inlineCapacity>::checkValidity() const { }
+template<typename T, size_t inlineCapacity> inline void Deque<T, inlineCapacity>::checkIndexValidity(size_t) const { }
+template<typename T, size_t inlineCapacity> inline void Deque<T, inlineCapacity>::invalidateIterators() { }
#else
- template<typename T, size_t inlineCapacity>
- void Deque<T, inlineCapacity>::checkValidity() const
- {
- // In this implementation a capacity of 1 would confuse append() and
- // other places that assume the index after capacity - 1 is 0.
- ASSERT(m_buffer.capacity() != 1);
-
- if (!m_buffer.capacity()) {
- ASSERT(!m_start);
- ASSERT(!m_end);
- } else {
- ASSERT(m_start < m_buffer.capacity());
- ASSERT(m_end < m_buffer.capacity());
- }
- }
-
- template<typename T, size_t inlineCapacity>
- void Deque<T, inlineCapacity>::checkIndexValidity(size_t index) const
- {
- ASSERT_UNUSED(index, index <= m_buffer.capacity());
- if (m_start <= m_end) {
- ASSERT(index >= m_start);
- ASSERT(index <= m_end);
- } else {
- ASSERT(index >= m_start || index <= m_end);
- }
- }
-
- template<typename T, size_t inlineCapacity>
- void Deque<T, inlineCapacity>::invalidateIterators()
- {
- IteratorBase* next;
- for (IteratorBase* p = m_iterators; p; p = next) {
- next = p->m_next;
- p->m_deque = 0;
- p->m_next = 0;
- p->m_previous = 0;
- }
- m_iterators = 0;
- }
+template<typename T, size_t inlineCapacity>
+void Deque<T, inlineCapacity>::checkValidity() const
+{
+ // In this implementation a capacity of 1 would confuse append() and
+ // other places that assume the index after capacity - 1 is 0.
+ ASSERT(m_buffer.capacity() != 1);
+
+ if (!m_buffer.capacity()) {
+ ASSERT(!m_start);
+ ASSERT(!m_end);
+ } else {
+ ASSERT(m_start < m_buffer.capacity());
+ ASSERT(m_end < m_buffer.capacity());
+ }
+}
+
+template<typename T, size_t inlineCapacity>
+void Deque<T, inlineCapacity>::checkIndexValidity(size_t index) const
+{
+ ASSERT_UNUSED(index, index <= m_buffer.capacity());
+ if (m_start <= m_end) {
+ ASSERT(index >= m_start);
+ ASSERT(index <= m_end);
+ } else {
+ ASSERT(index >= m_start || index <= m_end);
+ }
+}
+
+template<typename T, size_t inlineCapacity>
+void Deque<T, inlineCapacity>::invalidateIterators()
+{
+ IteratorBase* next;
+ for (IteratorBase* p = m_iterators; p; p = next) {
+ next = p->m_next;
+ p->m_deque = 0;
+ p->m_next = 0;
+ p->m_previous = 0;
+ }
+ m_iterators = 0;
+}
#endif
- template<typename T, size_t inlineCapacity>
- inline Deque<T, inlineCapacity>::Deque()
- : m_start(0)
- , m_end(0)
+template<typename T, size_t inlineCapacity>
+inline Deque<T, inlineCapacity>::Deque()
+ : m_start(0)
+ , m_end(0)
#ifndef NDEBUG
- , m_iterators(0)
+ , m_iterators(0)
#endif
- {
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline Deque<T, inlineCapacity>::Deque(const Deque<T, inlineCapacity>& other)
- : m_start(other.m_start)
- , m_end(other.m_end)
- , m_buffer(other.m_buffer.capacity())
+{
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline Deque<T, inlineCapacity>::Deque(std::initializer_list<T> initializerList)
+ : Deque()
+{
+ for (auto& element : initializerList)
+ append(element);
+}
+
+template<typename T, size_t inlineCapacity>
+inline Deque<T, inlineCapacity>::Deque(const Deque& other)
+ : m_start(other.m_start)
+ , m_end(other.m_end)
+ , m_buffer(other.m_buffer.capacity())
#ifndef NDEBUG
- , m_iterators(0)
+ , m_iterators(0)
#endif
- {
- const T* otherBuffer = other.m_buffer.buffer();
- if (m_start <= m_end)
- TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_end, m_buffer.buffer() + m_start);
- else {
- TypeOperations::uninitializedCopy(otherBuffer, otherBuffer + m_end, m_buffer.buffer());
- TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_buffer.capacity(), m_buffer.buffer() + m_start);
- }
- }
-
- template<typename T, size_t inlineCapacity>
- inline Deque<T, inlineCapacity>& Deque<T, inlineCapacity>::operator=(const Deque<T, inlineCapacity>& other)
- {
- // FIXME: This is inefficient if we're using an inline buffer and T is
- // expensive to copy since it will copy the buffer twice instead of once.
- Deque<T> copy(other);
- swap(copy);
- return *this;
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::destroyAll()
- {
- if (m_start <= m_end)
- TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_end);
- else {
- TypeOperations::destruct(m_buffer.buffer(), m_buffer.buffer() + m_end);
- TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_buffer.capacity());
- }
- }
-
- template<typename T, size_t inlineCapacity>
- inline Deque<T, inlineCapacity>::~Deque()
- {
- checkValidity();
- invalidateIterators();
- destroyAll();
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::swap(Deque<T, inlineCapacity>& other)
- {
- checkValidity();
- other.checkValidity();
- invalidateIterators();
- std::swap(m_start, other.m_start);
- std::swap(m_end, other.m_end);
- m_buffer.swap(other.m_buffer);
- checkValidity();
- other.checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::clear()
- {
- checkValidity();
- invalidateIterators();
- destroyAll();
- m_start = 0;
- m_end = 0;
- m_buffer.deallocateBuffer(m_buffer.buffer());
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- template<typename Predicate>
- inline auto Deque<T, inlineCapacity>::findIf(Predicate&& predicate) -> iterator
- {
- iterator end_iterator = end();
- for (iterator it = begin(); it != end_iterator; ++it) {
- if (predicate(*it))
- return it;
- }
- return end_iterator;
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::expandCapacityIfNeeded()
- {
- if (m_start) {
- if (m_end + 1 != m_start)
- return;
- } else if (m_end) {
- if (m_end != m_buffer.capacity() - 1)
- return;
- } else if (m_buffer.capacity())
+{
+ const T* otherBuffer = other.m_buffer.buffer();
+ if (m_start <= m_end)
+ TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_end, m_buffer.buffer() + m_start);
+ else {
+ TypeOperations::uninitializedCopy(otherBuffer, otherBuffer + m_end, m_buffer.buffer());
+ TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_buffer.capacity(), m_buffer.buffer() + m_start);
+ }
+}
+
+template<typename T, size_t inlineCapacity>
+inline Deque<T, inlineCapacity>::Deque(Deque&& other)
+ : Deque()
+{
+ swap(other);
+}
+
+template<typename T, size_t inlineCapacity>
+inline auto Deque<T, inlineCapacity>::operator=(const Deque& other) -> Deque&
+{
+ // FIXME: This is inefficient if we're using an inline buffer and T is
+ // expensive to copy since it will copy the buffer twice instead of once.
+ Deque<T, inlineCapacity> copy(other);
+ swap(copy);
+ return *this;
+}
+
+template<typename T, size_t inlineCapacity>
+inline auto Deque<T, inlineCapacity>::operator=(Deque&& other) -> Deque&
+{
+ swap(other);
+ return *this;
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::destroyAll()
+{
+ if (m_start <= m_end)
+ TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_end);
+ else {
+ TypeOperations::destruct(m_buffer.buffer(), m_buffer.buffer() + m_end);
+ TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_buffer.capacity());
+ }
+}
+
+template<typename T, size_t inlineCapacity>
+inline Deque<T, inlineCapacity>::~Deque()
+{
+ checkValidity();
+ invalidateIterators();
+ destroyAll();
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::swap(Deque<T, inlineCapacity>& other)
+{
+ checkValidity();
+ other.checkValidity();
+ invalidateIterators();
+ std::swap(m_start, other.m_start);
+ std::swap(m_end, other.m_end);
+ m_buffer.swap(other.m_buffer, 0, 0);
+ checkValidity();
+ other.checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::clear()
+{
+ checkValidity();
+ invalidateIterators();
+ destroyAll();
+ m_start = 0;
+ m_end = 0;
+ m_buffer.deallocateBuffer(m_buffer.buffer());
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+template<typename Predicate>
+inline auto Deque<T, inlineCapacity>::findIf(Predicate&& predicate) -> iterator
+{
+ iterator end_iterator = end();
+ for (iterator it = begin(); it != end_iterator; ++it) {
+ if (predicate(*it))
+ return it;
+ }
+ return end_iterator;
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::expandCapacityIfNeeded()
+{
+ if (m_start) {
+ if (m_end + 1 != m_start)
return;
-
- expandCapacity();
- }
-
- template<typename T, size_t inlineCapacity>
- void Deque<T, inlineCapacity>::expandCapacity()
- {
- checkValidity();
- size_t oldCapacity = m_buffer.capacity();
- T* oldBuffer = m_buffer.buffer();
- m_buffer.allocateBuffer(std::max(static_cast<size_t>(16), oldCapacity + oldCapacity / 4 + 1));
- if (m_start <= m_end)
- TypeOperations::move(oldBuffer + m_start, oldBuffer + m_end, m_buffer.buffer() + m_start);
- else {
- TypeOperations::move(oldBuffer, oldBuffer + m_end, m_buffer.buffer());
- size_t newStart = m_buffer.capacity() - (oldCapacity - m_start);
- TypeOperations::move(oldBuffer + m_start, oldBuffer + oldCapacity, m_buffer.buffer() + newStart);
- m_start = newStart;
- }
- m_buffer.deallocateBuffer(oldBuffer);
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline auto Deque<T, inlineCapacity>::takeFirst() -> T
- {
- T oldFirst = std::move(first());
- removeFirst();
- return oldFirst;
- }
-
- template<typename T, size_t inlineCapacity>
- inline auto Deque<T, inlineCapacity>::takeLast() -> T
- {
- T oldLast = std::move(last());
- removeLast();
- return oldLast;
- }
-
- template<typename T, size_t inlineCapacity> template<typename U>
- inline void Deque<T, inlineCapacity>::append(U&& value)
- {
- checkValidity();
- expandCapacityIfNeeded();
- new (NotNull, &m_buffer.buffer()[m_end]) T(std::forward<U>(value));
- if (m_end == m_buffer.capacity() - 1)
- m_end = 0;
- else
- ++m_end;
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity> template<typename U>
- inline void Deque<T, inlineCapacity>::prepend(U&& value)
- {
- checkValidity();
- expandCapacityIfNeeded();
- if (!m_start)
- m_start = m_buffer.capacity() - 1;
- else
- --m_start;
- new (NotNull, &m_buffer.buffer()[m_start]) T(std::forward<U>(value));
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::removeFirst()
- {
- checkValidity();
- invalidateIterators();
- ASSERT(!isEmpty());
- TypeOperations::destruct(&m_buffer.buffer()[m_start], &m_buffer.buffer()[m_start + 1]);
- if (m_start == m_buffer.capacity() - 1)
- m_start = 0;
- else
- ++m_start;
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::removeLast()
- {
- checkValidity();
- invalidateIterators();
- ASSERT(!isEmpty());
- if (!m_end)
- m_end = m_buffer.capacity() - 1;
- else
- --m_end;
- TypeOperations::destruct(&m_buffer.buffer()[m_end], &m_buffer.buffer()[m_end + 1]);
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::remove(iterator& it)
- {
- it.checkValidity();
- remove(it.m_index);
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::remove(const_iterator& it)
- {
- it.checkValidity();
- remove(it.m_index);
- }
-
- template<typename T, size_t inlineCapacity>
- inline void Deque<T, inlineCapacity>::remove(size_t position)
- {
- if (position == m_end)
+ } else if (m_end) {
+ if (m_end != m_buffer.capacity() - 1)
return;
-
- checkValidity();
- invalidateIterators();
-
- T* buffer = m_buffer.buffer();
- TypeOperations::destruct(&buffer[position], &buffer[position + 1]);
-
- // Find which segment of the circular buffer contained the remove element, and only move elements in that part.
- if (position >= m_start) {
- TypeOperations::moveOverlapping(buffer + m_start, buffer + position, buffer + m_start + 1);
- m_start = (m_start + 1) % m_buffer.capacity();
- } else {
- TypeOperations::moveOverlapping(buffer + position + 1, buffer + m_end, buffer + position);
- m_end = (m_end - 1 + m_buffer.capacity()) % m_buffer.capacity();
- }
- checkValidity();
- }
+ } else if (m_buffer.capacity())
+ return;
+
+ expandCapacity();
+}
+
+template<typename T, size_t inlineCapacity>
+void Deque<T, inlineCapacity>::expandCapacity()
+{
+ checkValidity();
+ size_t oldCapacity = m_buffer.capacity();
+ T* oldBuffer = m_buffer.buffer();
+ m_buffer.allocateBuffer(std::max(static_cast<size_t>(16), oldCapacity + oldCapacity / 4 + 1));
+ if (m_start <= m_end)
+ TypeOperations::move(oldBuffer + m_start, oldBuffer + m_end, m_buffer.buffer() + m_start);
+ else {
+ TypeOperations::move(oldBuffer, oldBuffer + m_end, m_buffer.buffer());
+ size_t newStart = m_buffer.capacity() - (oldCapacity - m_start);
+ TypeOperations::move(oldBuffer + m_start, oldBuffer + oldCapacity, m_buffer.buffer() + newStart);
+ m_start = newStart;
+ }
+ m_buffer.deallocateBuffer(oldBuffer);
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline auto Deque<T, inlineCapacity>::takeFirst() -> T
+{
+ T oldFirst = WTFMove(first());
+ removeFirst();
+ return oldFirst;
+}
+
+template<typename T, size_t inlineCapacity>
+inline auto Deque<T, inlineCapacity>::takeLast() -> T
+{
+ T oldLast = WTFMove(last());
+ removeLast();
+ return oldLast;
+}
+
+template<typename T, size_t inlineCapacity> template<typename U>
+inline void Deque<T, inlineCapacity>::append(U&& value)
+{
+ checkValidity();
+ expandCapacityIfNeeded();
+ new (NotNull, std::addressof(m_buffer.buffer()[m_end])) T(std::forward<U>(value));
+ if (m_end == m_buffer.capacity() - 1)
+ m_end = 0;
+ else
+ ++m_end;
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity> template<typename U>
+inline void Deque<T, inlineCapacity>::prepend(U&& value)
+{
+ checkValidity();
+ expandCapacityIfNeeded();
+ if (!m_start)
+ m_start = m_buffer.capacity() - 1;
+ else
+ --m_start;
+ new (NotNull, std::addressof(m_buffer.buffer()[m_start])) T(std::forward<U>(value));
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::removeFirst()
+{
+ checkValidity();
+ invalidateIterators();
+ ASSERT(!isEmpty());
+ TypeOperations::destruct(std::addressof(m_buffer.buffer()[m_start]), std::addressof(m_buffer.buffer()[m_start + 1]));
+ if (m_start == m_buffer.capacity() - 1)
+ m_start = 0;
+ else
+ ++m_start;
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::removeLast()
+{
+ checkValidity();
+ invalidateIterators();
+ ASSERT(!isEmpty());
+ if (!m_end)
+ m_end = m_buffer.capacity() - 1;
+ else
+ --m_end;
+ TypeOperations::destruct(std::addressof(m_buffer.buffer()[m_end]), std::addressof(m_buffer.buffer()[m_end + 1]));
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::remove(iterator& it)
+{
+ it.checkValidity();
+ remove(it.m_index);
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::remove(const_iterator& it)
+{
+ it.checkValidity();
+ remove(it.m_index);
+}
+
+template<typename T, size_t inlineCapacity>
+inline void Deque<T, inlineCapacity>::remove(size_t position)
+{
+ if (position == m_end)
+ return;
+
+ checkValidity();
+ invalidateIterators();
+
+ T* buffer = m_buffer.buffer();
+ TypeOperations::destruct(std::addressof(buffer[position]), std::addressof(buffer[position + 1]));
+
+ // Find which segment of the circular buffer contained the remove element, and only move elements in that part.
+ if (position >= m_start) {
+ TypeOperations::moveOverlapping(buffer + m_start, buffer + position, buffer + m_start + 1);
+ m_start = (m_start + 1) % m_buffer.capacity();
+ } else {
+ TypeOperations::moveOverlapping(buffer + position + 1, buffer + m_end, buffer + position);
+ m_end = (m_end - 1 + m_buffer.capacity()) % m_buffer.capacity();
+ }
+ checkValidity();
+}
#ifdef NDEBUG
- template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::checkValidity() const { }
- template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::checkValidity(const DequeIteratorBase<T, inlineCapacity>&) const { }
- template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::addToIteratorsList() { }
- template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::removeFromIteratorsList() { }
+template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::checkValidity() const { }
+template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::checkValidity(const DequeIteratorBase<T, inlineCapacity>&) const { }
+template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::addToIteratorsList() { }
+template<typename T, size_t inlineCapacity> inline void DequeIteratorBase<T, inlineCapacity>::removeFromIteratorsList() { }
#else
- template<typename T, size_t inlineCapacity>
- void DequeIteratorBase<T, inlineCapacity>::checkValidity() const
- {
- ASSERT(m_deque);
- m_deque->checkIndexValidity(m_index);
- }
-
- template<typename T, size_t inlineCapacity>
- void DequeIteratorBase<T, inlineCapacity>::checkValidity(const DequeIteratorBase& other) const
- {
- checkValidity();
- other.checkValidity();
- ASSERT(m_deque == other.m_deque);
- }
-
- template<typename T, size_t inlineCapacity>
- void DequeIteratorBase<T, inlineCapacity>::addToIteratorsList()
- {
- if (!m_deque)
- m_next = 0;
- else {
- m_next = m_deque->m_iterators;
- m_deque->m_iterators = this;
- if (m_next)
- m_next->m_previous = this;
+template<typename T, size_t inlineCapacity>
+void DequeIteratorBase<T, inlineCapacity>::checkValidity() const
+{
+ ASSERT(m_deque);
+ m_deque->checkIndexValidity(m_index);
+}
+
+template<typename T, size_t inlineCapacity>
+void DequeIteratorBase<T, inlineCapacity>::checkValidity(const DequeIteratorBase& other) const
+{
+ checkValidity();
+ other.checkValidity();
+ ASSERT(m_deque == other.m_deque);
+}
+
+template<typename T, size_t inlineCapacity>
+void DequeIteratorBase<T, inlineCapacity>::addToIteratorsList()
+{
+ if (!m_deque)
+ m_next = 0;
+ else {
+ m_next = m_deque->m_iterators;
+ m_deque->m_iterators = this;
+ if (m_next)
+ m_next->m_previous = this;
+ }
+ m_previous = 0;
+}
+
+template<typename T, size_t inlineCapacity>
+void DequeIteratorBase<T, inlineCapacity>::removeFromIteratorsList()
+{
+ if (!m_deque) {
+ ASSERT(!m_next);
+ ASSERT(!m_previous);
+ } else {
+ if (m_next) {
+ ASSERT(m_next->m_previous == this);
+ m_next->m_previous = m_previous;
}
- m_previous = 0;
- }
-
- template<typename T, size_t inlineCapacity>
- void DequeIteratorBase<T, inlineCapacity>::removeFromIteratorsList()
- {
- if (!m_deque) {
- ASSERT(!m_next);
- ASSERT(!m_previous);
+ if (m_previous) {
+ ASSERT(m_deque->m_iterators != this);
+ ASSERT(m_previous->m_next == this);
+ m_previous->m_next = m_next;
} else {
- if (m_next) {
- ASSERT(m_next->m_previous == this);
- m_next->m_previous = m_previous;
- }
- if (m_previous) {
- ASSERT(m_deque->m_iterators != this);
- ASSERT(m_previous->m_next == this);
- m_previous->m_next = m_next;
- } else {
- ASSERT(m_deque->m_iterators == this);
- m_deque->m_iterators = m_next;
- }
+ ASSERT(m_deque->m_iterators == this);
+ m_deque->m_iterators = m_next;
}
- m_next = 0;
- m_previous = 0;
}
+ m_next = 0;
+ m_previous = 0;
+}
#endif
- template<typename T, size_t inlineCapacity>
- inline DequeIteratorBase<T, inlineCapacity>::DequeIteratorBase()
- : m_deque(0)
- {
- }
-
- template<typename T, size_t inlineCapacity>
- inline DequeIteratorBase<T, inlineCapacity>::DequeIteratorBase(const Deque<T, inlineCapacity>* deque, size_t index)
- : m_deque(const_cast<Deque<T, inlineCapacity>*>(deque))
- , m_index(index)
- {
- addToIteratorsList();
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline DequeIteratorBase<T, inlineCapacity>::DequeIteratorBase(const DequeIteratorBase& other)
- : m_deque(other.m_deque)
- , m_index(other.m_index)
- {
- addToIteratorsList();
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline DequeIteratorBase<T, inlineCapacity>& DequeIteratorBase<T, inlineCapacity>::operator=(const DequeIteratorBase& other)
- {
- other.checkValidity();
- removeFromIteratorsList();
-
- m_deque = other.m_deque;
- m_index = other.m_index;
- addToIteratorsList();
- checkValidity();
- return *this;
- }
-
- template<typename T, size_t inlineCapacity>
- inline DequeIteratorBase<T, inlineCapacity>::~DequeIteratorBase()
- {
+template<typename T, size_t inlineCapacity>
+inline DequeIteratorBase<T, inlineCapacity>::DequeIteratorBase()
+ : m_deque(0)
+{
+}
+
+template<typename T, size_t inlineCapacity>
+inline DequeIteratorBase<T, inlineCapacity>::DequeIteratorBase(const Deque<T, inlineCapacity>* deque, size_t index)
+ : m_deque(const_cast<Deque<T, inlineCapacity>*>(deque))
+ , m_index(index)
+{
+ addToIteratorsList();
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline DequeIteratorBase<T, inlineCapacity>::DequeIteratorBase(const DequeIteratorBase& other)
+ : m_deque(other.m_deque)
+ , m_index(other.m_index)
+{
+ addToIteratorsList();
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline DequeIteratorBase<T, inlineCapacity>& DequeIteratorBase<T, inlineCapacity>::operator=(const DequeIteratorBase& other)
+{
+ other.checkValidity();
+ removeFromIteratorsList();
+
+ m_deque = other.m_deque;
+ m_index = other.m_index;
+ addToIteratorsList();
+ checkValidity();
+ return *this;
+}
+
+template<typename T, size_t inlineCapacity>
+inline DequeIteratorBase<T, inlineCapacity>::~DequeIteratorBase()
+{
#ifndef NDEBUG
- removeFromIteratorsList();
- m_deque = 0;
+ removeFromIteratorsList();
+ m_deque = 0;
#endif
- }
-
- template<typename T, size_t inlineCapacity>
- inline bool DequeIteratorBase<T, inlineCapacity>::isEqual(const DequeIteratorBase& other) const
- {
- checkValidity(other);
- return m_index == other.m_index;
- }
-
- template<typename T, size_t inlineCapacity>
- inline void DequeIteratorBase<T, inlineCapacity>::increment()
- {
- checkValidity();
- ASSERT(m_index != m_deque->m_end);
- ASSERT(m_deque->m_buffer.capacity());
- if (m_index == m_deque->m_buffer.capacity() - 1)
- m_index = 0;
- else
- ++m_index;
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline void DequeIteratorBase<T, inlineCapacity>::decrement()
- {
- checkValidity();
- ASSERT(m_index != m_deque->m_start);
- ASSERT(m_deque->m_buffer.capacity());
- if (!m_index)
- m_index = m_deque->m_buffer.capacity() - 1;
- else
- --m_index;
- checkValidity();
- }
-
- template<typename T, size_t inlineCapacity>
- inline T* DequeIteratorBase<T, inlineCapacity>::after() const
- {
- checkValidity();
- ASSERT(m_index != m_deque->m_end);
- return &m_deque->m_buffer.buffer()[m_index];
- }
-
- template<typename T, size_t inlineCapacity>
- inline T* DequeIteratorBase<T, inlineCapacity>::before() const
- {
- checkValidity();
- ASSERT(m_index != m_deque->m_start);
- if (!m_index)
- return &m_deque->m_buffer.buffer()[m_deque->m_buffer.capacity() - 1];
- return &m_deque->m_buffer.buffer()[m_index - 1];
- }
+}
+
+template<typename T, size_t inlineCapacity>
+inline bool DequeIteratorBase<T, inlineCapacity>::isEqual(const DequeIteratorBase& other) const
+{
+ checkValidity(other);
+ return m_index == other.m_index;
+}
+
+template<typename T, size_t inlineCapacity>
+inline void DequeIteratorBase<T, inlineCapacity>::increment()
+{
+ checkValidity();
+ ASSERT(m_index != m_deque->m_end);
+ ASSERT(m_deque->m_buffer.capacity());
+ if (m_index == m_deque->m_buffer.capacity() - 1)
+ m_index = 0;
+ else
+ ++m_index;
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline void DequeIteratorBase<T, inlineCapacity>::decrement()
+{
+ checkValidity();
+ ASSERT(m_index != m_deque->m_start);
+ ASSERT(m_deque->m_buffer.capacity());
+ if (!m_index)
+ m_index = m_deque->m_buffer.capacity() - 1;
+ else
+ --m_index;
+ checkValidity();
+}
+
+template<typename T, size_t inlineCapacity>
+inline T* DequeIteratorBase<T, inlineCapacity>::after() const
+{
+ checkValidity();
+ ASSERT(m_index != m_deque->m_end);
+ return std::addressof(m_deque->m_buffer.buffer()[m_index]);
+}
+
+template<typename T, size_t inlineCapacity>
+inline T* DequeIteratorBase<T, inlineCapacity>::before() const
+{
+ checkValidity();
+ ASSERT(m_index != m_deque->m_start);
+ if (!m_index)
+ return std::addressof(m_deque->m_buffer.buffer()[m_deque->m_buffer.capacity() - 1]);
+ return std::addressof(m_deque->m_buffer.buffer()[m_index - 1]);
+}
} // namespace WTF
diff --git a/Source/WTF/wtf/DisallowCType.h b/Source/WTF/wtf/DisallowCType.h
index f40b7a414..30544e729 100644
--- a/Source/WTF/wtf/DisallowCType.h
+++ b/Source/WTF/wtf/DisallowCType.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -40,8 +40,7 @@
// are used from wx headers. On GTK+ for Mac many GTK+ files include <libintl.h>
// or <glib/gi18n-lib.h>, which in turn include <xlocale/_ctype.h> which uses
// isacii().
-#include <wtf/Platform.h>
-#if !(OS(DARWIN) && PLATFORM(GTK)) && !PLATFORM(EFL) && !defined(_LIBCPP_VERSION)
+#if !(OS(DARWIN) && PLATFORM(GTK)) && !defined(_LIBCPP_VERSION) && defined(__GLIBC__)
#include <ctype.h>
diff --git a/Source/WTF/wtf/Dominators.h b/Source/WTF/wtf/Dominators.h
new file mode 100644
index 000000000..e7ab52f7e
--- /dev/null
+++ b/Source/WTF/wtf/Dominators.h
@@ -0,0 +1,752 @@
+/*
+ * Copyright (C) 2011, 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTFDominators_h
+#define WTFDominators_h
+
+#include <wtf/FastBitVector.h>
+#include <wtf/GraphNodeWorklist.h>
+
+namespace WTF {
+
+// This is a utility for finding the dominators of a graph. Dominators are almost universally used
+// for control flow graph analysis, so this code will refer to the graph's "nodes" as "blocks". In
+// that regard this code is kind of specialized for the various JSC compilers, but you could use it
+// for non-compiler things if you are OK with referring to your "nodes" as "blocks".
+
+template<typename Graph>
+class Dominators {
+public:
+ Dominators(Graph& graph, bool selfCheck = false)
+ : m_graph(graph)
+ , m_data(graph.template newMap<BlockData>())
+ {
+ LengauerTarjan lengauerTarjan(m_graph);
+ lengauerTarjan.compute();
+
+ m_data = m_graph.template newMap<BlockData>();
+
+ // From here we want to build a spanning tree with both upward and downward links and we want
+ // to do a search over this tree to compute pre and post numbers that can be used for dominance
+ // tests.
+
+ for (unsigned blockIndex = m_graph.numNodes(); blockIndex--;) {
+ typename Graph::Node block = m_graph.node(blockIndex);
+ if (!block)
+ continue;
+
+ typename Graph::Node idomBlock = lengauerTarjan.immediateDominator(block);
+ m_data[block].idomParent = idomBlock;
+ if (idomBlock)
+ m_data[idomBlock].idomKids.append(block);
+ }
+
+ unsigned nextPreNumber = 0;
+ unsigned nextPostNumber = 0;
+
+ // Plain stack-based worklist because we are guaranteed to see each block exactly once anyway.
+ Vector<GraphNodeWithOrder<typename Graph::Node>> worklist;
+ worklist.append(GraphNodeWithOrder<typename Graph::Node>(m_graph.root(), GraphVisitOrder::Pre));
+ while (!worklist.isEmpty()) {
+ GraphNodeWithOrder<typename Graph::Node> item = worklist.takeLast();
+ switch (item.order) {
+ case GraphVisitOrder::Pre:
+ m_data[item.node].preNumber = nextPreNumber++;
+ worklist.append(GraphNodeWithOrder<typename Graph::Node>(item.node, GraphVisitOrder::Post));
+ for (typename Graph::Node kid : m_data[item.node].idomKids)
+ worklist.append(GraphNodeWithOrder<typename Graph::Node>(kid, GraphVisitOrder::Pre));
+ break;
+ case GraphVisitOrder::Post:
+ m_data[item.node].postNumber = nextPostNumber++;
+ break;
+ }
+ }
+
+ if (selfCheck) {
+ // Check our dominator calculation:
+ // 1) Check that our range-based ancestry test is the same as a naive ancestry test.
+ // 2) Check that our notion of who dominates whom is identical to a naive (not
+ // Lengauer-Tarjan) dominator calculation.
+
+ ValidationContext context(m_graph, *this);
+
+ for (unsigned fromBlockIndex = m_graph.numNodes(); fromBlockIndex--;) {
+ typename Graph::Node fromBlock = m_graph.node(fromBlockIndex);
+ if (!fromBlock || m_data[fromBlock].preNumber == UINT_MAX)
+ continue;
+ for (unsigned toBlockIndex = m_graph.numNodes(); toBlockIndex--;) {
+ typename Graph::Node toBlock = m_graph.node(toBlockIndex);
+ if (!toBlock || m_data[toBlock].preNumber == UINT_MAX)
+ continue;
+
+ if (dominates(fromBlock, toBlock) != naiveDominates(fromBlock, toBlock))
+ context.reportError(fromBlock, toBlock, "Range-based domination check is broken");
+ if (dominates(fromBlock, toBlock) != context.naiveDominators.dominates(fromBlock, toBlock))
+ context.reportError(fromBlock, toBlock, "Lengauer-Tarjan domination is broken");
+ }
+ }
+
+ context.handleErrors();
+ }
+ }
+
+ bool strictlyDominates(typename Graph::Node from, typename Graph::Node to) const
+ {
+ return m_data[to].preNumber > m_data[from].preNumber
+ && m_data[to].postNumber < m_data[from].postNumber;
+ }
+
+ bool dominates(typename Graph::Node from, typename Graph::Node to) const
+ {
+ return from == to || strictlyDominates(from, to);
+ }
+
+ // Returns the immediate dominator of this block. Returns null for the root block.
+ typename Graph::Node idom(typename Graph::Node block) const
+ {
+ return m_data[block].idomParent;
+ }
+
+ template<typename Functor>
+ void forAllStrictDominatorsOf(typename Graph::Node to, const Functor& functor) const
+ {
+ for (typename Graph::Node block = m_data[to].idomParent; block; block = m_data[block].idomParent)
+ functor(block);
+ }
+
+ // Note: This will visit the dominators starting with the 'to' node and moving up the idom tree
+ // until it gets to the root. Some clients of this function, like B3::moveConstants(), rely on this
+ // order.
+ template<typename Functor>
+ void forAllDominatorsOf(typename Graph::Node to, const Functor& functor) const
+ {
+ for (typename Graph::Node block = to; block; block = m_data[block].idomParent)
+ functor(block);
+ }
+
+ template<typename Functor>
+ void forAllBlocksStrictlyDominatedBy(typename Graph::Node from, const Functor& functor) const
+ {
+ Vector<typename Graph::Node, 16> worklist;
+ worklist.appendVector(m_data[from].idomKids);
+ while (!worklist.isEmpty()) {
+ typename Graph::Node block = worklist.takeLast();
+ functor(block);
+ worklist.appendVector(m_data[block].idomKids);
+ }
+ }
+
+ template<typename Functor>
+ void forAllBlocksDominatedBy(typename Graph::Node from, const Functor& functor) const
+ {
+ Vector<typename Graph::Node, 16> worklist;
+ worklist.append(from);
+ while (!worklist.isEmpty()) {
+ typename Graph::Node block = worklist.takeLast();
+ functor(block);
+ worklist.appendVector(m_data[block].idomKids);
+ }
+ }
+
+ typename Graph::Set strictDominatorsOf(typename Graph::Node to) const
+ {
+ typename Graph::Set result;
+ forAllStrictDominatorsOf(
+ to,
+ [&] (typename Graph::Node node) {
+ result.add(node);
+ });
+ return result;
+ }
+
+ typename Graph::Set dominatorsOf(typename Graph::Node to) const
+ {
+ typename Graph::Set result;
+ forAllDominatorsOf(
+ to,
+ [&] (typename Graph::Node node) {
+ result.add(node);
+ });
+ return result;
+ }
+
+ typename Graph::Set blocksStrictlyDominatedBy(typename Graph::Node from) const
+ {
+ typename Graph::Set result;
+ forAllBlocksStrictlyDominatedBy(
+ from,
+ [&] (typename Graph::Node node) {
+ result.add(node);
+ });
+ return result;
+ }
+
+ typename Graph::Set blocksDominatedBy(typename Graph::Node from) const
+ {
+ typename Graph::Set result;
+ forAllBlocksDominatedBy(
+ from,
+ [&] (typename Graph::Node node) {
+ result.add(node);
+ });
+ return result;
+ }
+
+ template<typename Functor>
+ void forAllBlocksInDominanceFrontierOf(
+ typename Graph::Node from, const Functor& functor) const
+ {
+ typename Graph::Set set;
+ forAllBlocksInDominanceFrontierOfImpl(
+ from,
+ [&] (typename Graph::Node block) {
+ if (set.add(block))
+ functor(block);
+ });
+ }
+
+ typename Graph::Set dominanceFrontierOf(typename Graph::Node from) const
+ {
+ typename Graph::Set result;
+ forAllBlocksInDominanceFrontierOf(
+ from,
+ [&] (typename Graph::Node node) {
+ result.add(node);
+ });
+ return result;
+ }
+
+ template<typename Functor>
+ void forAllBlocksInIteratedDominanceFrontierOf(const typename Graph::List& from, const Functor& functor)
+ {
+ forAllBlocksInPrunedIteratedDominanceFrontierOf(
+ from,
+ [&] (typename Graph::Node block) -> bool {
+ functor(block);
+ return true;
+ });
+ }
+
+ // This is a close relative of forAllBlocksInIteratedDominanceFrontierOf(), which allows the
+ // given functor to return false to indicate that we don't wish to consider the given block.
+ // Useful for computing pruned SSA form.
+ template<typename Functor>
+ void forAllBlocksInPrunedIteratedDominanceFrontierOf(
+ const typename Graph::List& from, const Functor& functor)
+ {
+ typename Graph::Set set;
+ forAllBlocksInIteratedDominanceFrontierOfImpl(
+ from,
+ [&] (typename Graph::Node block) -> bool {
+ if (!set.add(block))
+ return false;
+ return functor(block);
+ });
+ }
+
+ typename Graph::Set iteratedDominanceFrontierOf(const typename Graph::List& from) const
+ {
+ typename Graph::Set result;
+ forAllBlocksInIteratedDominanceFrontierOfImpl(
+ from,
+ [&] (typename Graph::Node node) -> bool {
+ return result.add(node);
+ });
+ return result;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ for (unsigned blockIndex = 0; blockIndex < m_data.size(); ++blockIndex) {
+ if (m_data[blockIndex].preNumber == UINT_MAX)
+ continue;
+
+ out.print(" Block #", blockIndex, ": idom = ", m_graph.dump(m_data[blockIndex].idomParent), ", idomKids = [");
+ CommaPrinter comma;
+ for (unsigned i = 0; i < m_data[blockIndex].idomKids.size(); ++i)
+ out.print(comma, m_graph.dump(m_data[blockIndex].idomKids[i]));
+ out.print("], pre/post = ", m_data[blockIndex].preNumber, "/", m_data[blockIndex].postNumber, "\n");
+ }
+ }
+
+private:
+ // This implements Lengauer and Tarjan's "A Fast Algorithm for Finding Dominators in a Flowgraph"
+ // (TOPLAS 1979). It uses the "simple" implementation of LINK and EVAL, which yields an O(n log n)
+ // solution. The full paper is linked below; this code attempts to closely follow the algorithm as
+ // it is presented in the paper; in particular sections 3 and 4 as well as appendix B.
+ // https://www.cs.princeton.edu/courses/archive/fall03/cs528/handouts/a%20fast%20algorithm%20for%20finding.pdf
+ //
+ // This code is very subtle. The Lengauer-Tarjan algorithm is incredibly deep to begin with. The
+ // goal of this code is to follow the code in the paper, however our implementation must deviate
+ // from the paper when it comes to recursion. The authors had used recursion to implement DFS, and
+ // also to implement the "simple" EVAL. We convert both of those into worklist-based solutions.
+ // Finally, once the algorithm gives us immediate dominators, we implement dominance tests by
+ // walking the dominator tree and computing pre and post numbers. We then use the range inclusion
+ // check trick that was first discovered by Paul F. Dietz in 1982 in "Maintaining order in a linked
+ // list" (see http://dl.acm.org/citation.cfm?id=802184).
+
+ class LengauerTarjan {
+ public:
+ LengauerTarjan(Graph& graph)
+ : m_graph(graph)
+ , m_data(graph.template newMap<BlockData>())
+ {
+ for (unsigned blockIndex = m_graph.numNodes(); blockIndex--;) {
+ typename Graph::Node block = m_graph.node(blockIndex);
+ if (!block)
+ continue;
+ m_data[block].label = block;
+ }
+ }
+
+ void compute()
+ {
+ computeDepthFirstPreNumbering(); // Step 1.
+ computeSemiDominatorsAndImplicitImmediateDominators(); // Steps 2 and 3.
+ computeExplicitImmediateDominators(); // Step 4.
+ }
+
+ typename Graph::Node immediateDominator(typename Graph::Node block)
+ {
+ return m_data[block].dom;
+ }
+
+ private:
+ void computeDepthFirstPreNumbering()
+ {
+ // Use a block worklist that also tracks the index inside the successor list. This is
+ // necessary for ensuring that we don't attempt to visit a successor until the previous
+ // successors that we had visited are fully processed. This ends up being revealed in the
+ // output of this method because the first time we see an edge to a block, we set the
+ // block's parent. So, if we have:
+ //
+ // A -> B
+ // A -> C
+ // B -> C
+ //
+ // And we're processing A, then we want to ensure that if we see A->B first (and hence set
+ // B's prenumber before we set C's) then we also end up setting C's parent to B by virtue
+ // of not noticing A->C until we're done processing B.
+
+ ExtendedGraphNodeWorklist<typename Graph::Node, unsigned, typename Graph::Set> worklist;
+ worklist.push(m_graph.root(), 0);
+
+ while (GraphNodeWith<typename Graph::Node, unsigned> item = worklist.pop()) {
+ typename Graph::Node block = item.node;
+ unsigned successorIndex = item.data;
+
+ // We initially push with successorIndex = 0 regardless of whether or not we have any
+ // successors. This is so that we can assign our prenumber. Subsequently we get pushed
+ // with higher successorIndex values, but only if they are in range.
+ ASSERT(!successorIndex || successorIndex < m_graph.successors(block).size());
+
+ if (!successorIndex) {
+ m_data[block].semiNumber = m_blockByPreNumber.size();
+ m_blockByPreNumber.append(block);
+ }
+
+ if (successorIndex < m_graph.successors(block).size()) {
+ unsigned nextSuccessorIndex = successorIndex + 1;
+ if (nextSuccessorIndex < m_graph.successors(block).size())
+ worklist.forcePush(block, nextSuccessorIndex);
+
+ typename Graph::Node successorBlock = m_graph.successors(block)[successorIndex];
+ if (worklist.push(successorBlock, 0))
+ m_data[successorBlock].parent = block;
+ }
+ }
+ }
+
+ void computeSemiDominatorsAndImplicitImmediateDominators()
+ {
+ for (unsigned currentPreNumber = m_blockByPreNumber.size(); currentPreNumber-- > 1;) {
+ typename Graph::Node block = m_blockByPreNumber[currentPreNumber];
+ BlockData& blockData = m_data[block];
+
+ // Step 2:
+ for (typename Graph::Node predecessorBlock : m_graph.predecessors(block)) {
+ typename Graph::Node intermediateBlock = eval(predecessorBlock);
+ blockData.semiNumber = std::min(
+ m_data[intermediateBlock].semiNumber, blockData.semiNumber);
+ }
+ unsigned bucketPreNumber = blockData.semiNumber;
+ ASSERT(bucketPreNumber <= currentPreNumber);
+ m_data[m_blockByPreNumber[bucketPreNumber]].bucket.append(block);
+ link(blockData.parent, block);
+
+ // Step 3:
+ for (typename Graph::Node semiDominee : m_data[blockData.parent].bucket) {
+ typename Graph::Node possibleDominator = eval(semiDominee);
+ BlockData& semiDomineeData = m_data[semiDominee];
+ ASSERT(m_blockByPreNumber[semiDomineeData.semiNumber] == blockData.parent);
+ BlockData& possibleDominatorData = m_data[possibleDominator];
+ if (possibleDominatorData.semiNumber < semiDomineeData.semiNumber)
+ semiDomineeData.dom = possibleDominator;
+ else
+ semiDomineeData.dom = blockData.parent;
+ }
+ m_data[blockData.parent].bucket.clear();
+ }
+ }
+
+ void computeExplicitImmediateDominators()
+ {
+ for (unsigned currentPreNumber = 1; currentPreNumber < m_blockByPreNumber.size(); ++currentPreNumber) {
+ typename Graph::Node block = m_blockByPreNumber[currentPreNumber];
+ BlockData& blockData = m_data[block];
+
+ if (blockData.dom != m_blockByPreNumber[blockData.semiNumber])
+ blockData.dom = m_data[blockData.dom].dom;
+ }
+ }
+
+ void link(typename Graph::Node from, typename Graph::Node to)
+ {
+ m_data[to].ancestor = from;
+ }
+
+ typename Graph::Node eval(typename Graph::Node block)
+ {
+ if (!m_data[block].ancestor)
+ return block;
+
+ compress(block);
+ return m_data[block].label;
+ }
+
+ void compress(typename Graph::Node initialBlock)
+ {
+ // This was meant to be a recursive function, but we don't like recursion because we don't
+ // want to blow the stack. The original function will call compress() recursively on the
+ // ancestor of anything that has an ancestor. So, we populate our worklist with the
+ // recursive ancestors of initialBlock. Then we process the list starting from the block
+ // that is furthest up the ancestor chain.
+
+ typename Graph::Node ancestor = m_data[initialBlock].ancestor;
+ ASSERT(ancestor);
+ if (!m_data[ancestor].ancestor)
+ return;
+
+ Vector<typename Graph::Node, 16> stack;
+ for (typename Graph::Node block = initialBlock; block; block = m_data[block].ancestor)
+ stack.append(block);
+
+ // We only care about blocks that have an ancestor that has an ancestor. The last two
+ // elements in the stack won't satisfy this property.
+ ASSERT(stack.size() >= 2);
+ ASSERT(!m_data[stack[stack.size() - 1]].ancestor);
+ ASSERT(!m_data[m_data[stack[stack.size() - 2]].ancestor].ancestor);
+
+ for (unsigned i = stack.size() - 2; i--;) {
+ typename Graph::Node block = stack[i];
+ typename Graph::Node& labelOfBlock = m_data[block].label;
+ typename Graph::Node& ancestorOfBlock = m_data[block].ancestor;
+ ASSERT(ancestorOfBlock);
+ ASSERT(m_data[ancestorOfBlock].ancestor);
+
+ typename Graph::Node labelOfAncestorOfBlock = m_data[ancestorOfBlock].label;
+
+ if (m_data[labelOfAncestorOfBlock].semiNumber < m_data[labelOfBlock].semiNumber)
+ labelOfBlock = labelOfAncestorOfBlock;
+ ancestorOfBlock = m_data[ancestorOfBlock].ancestor;
+ }
+ }
+
+ struct BlockData {
+ BlockData()
+ : parent(nullptr)
+ , preNumber(UINT_MAX)
+ , semiNumber(UINT_MAX)
+ , ancestor(nullptr)
+ , label(nullptr)
+ , dom(nullptr)
+ {
+ }
+
+ typename Graph::Node parent;
+ unsigned preNumber;
+ unsigned semiNumber;
+ typename Graph::Node ancestor;
+ typename Graph::Node label;
+ Vector<typename Graph::Node> bucket;
+ typename Graph::Node dom;
+ };
+
+ Graph& m_graph;
+ typename Graph::template Map<BlockData> m_data;
+ Vector<typename Graph::Node> m_blockByPreNumber;
+ };
+
+ class NaiveDominators {
+ public:
+ NaiveDominators(Graph& graph)
+ : m_graph(graph)
+ {
+ // This implements a naive dominator solver.
+
+ ASSERT(!graph.predecessors(graph.root()).size());
+
+ unsigned numBlocks = graph.numNodes();
+
+ // Allocate storage for the dense dominance matrix.
+ m_results.grow(numBlocks);
+ for (unsigned i = numBlocks; i--;)
+ m_results[i].resize(numBlocks);
+ m_scratch.resize(numBlocks);
+
+ // We know that the entry block is only dominated by itself.
+ m_results[0].clearAll();
+ m_results[0][0] = true;
+
+ // Find all of the valid blocks.
+ m_scratch.clearAll();
+ for (unsigned i = numBlocks; i--;) {
+ if (!graph.node(i))
+ continue;
+ m_scratch[i] = true;
+ }
+
+ // Mark all nodes as dominated by everything.
+ for (unsigned i = numBlocks; i-- > 1;) {
+ if (!graph.node(i) || !graph.predecessors(graph.node(i)).size())
+ m_results[i].clearAll();
+ else
+ m_results[i] = m_scratch;
+ }
+
+ // Iteratively eliminate nodes that are not dominator.
+ bool changed;
+ do {
+ changed = false;
+ // Prune dominators in all non entry blocks: forward scan.
+ for (unsigned i = 1; i < numBlocks; ++i)
+ changed |= pruneDominators(i);
+
+ if (!changed)
+ break;
+
+ // Prune dominators in all non entry blocks: backward scan.
+ changed = false;
+ for (unsigned i = numBlocks; i-- > 1;)
+ changed |= pruneDominators(i);
+ } while (changed);
+ }
+
+ bool dominates(unsigned from, unsigned to) const
+ {
+ return m_results[to][from];
+ }
+
+ bool dominates(typename Graph::Node from, typename Graph::Node to) const
+ {
+ return dominates(m_graph.index(from), m_graph.index(to));
+ }
+
+ void dump(PrintStream& out) const
+ {
+ for (unsigned blockIndex = 0; blockIndex < m_graph.numNodes(); ++blockIndex) {
+ typename Graph::Node block = m_graph.node(blockIndex);
+ if (!block)
+ continue;
+ out.print(" Block ", m_graph.dump(block), ":");
+ for (unsigned otherIndex = 0; otherIndex < m_graph.numNodes(); ++otherIndex) {
+ if (!dominates(m_graph.index(block), otherIndex))
+ continue;
+ out.print(" ", m_graph.dump(m_graph.node(otherIndex)));
+ }
+ out.print("\n");
+ }
+ }
+
+ private:
+ bool pruneDominators(unsigned idx)
+ {
+ typename Graph::Node block = m_graph.node(idx);
+
+ if (!block || !m_graph.predecessors(block).size())
+ return false;
+
+ // Find the intersection of dom(preds).
+ m_scratch = m_results[m_graph.index(m_graph.predecessors(block)[0])];
+ for (unsigned j = m_graph.predecessors(block).size(); j-- > 1;)
+ m_scratch &= m_results[m_graph.index(m_graph.predecessors(block)[j])];
+
+ // The block is also dominated by itself.
+ m_scratch[idx] = true;
+
+ return m_results[idx].setAndCheck(m_scratch);
+ }
+
+ Graph& m_graph;
+ Vector<FastBitVector> m_results; // For each block, the bitvector of blocks that dominate it.
+ FastBitVector m_scratch; // A temporary bitvector with bit for each block. We recycle this to save new/deletes.
+ };
+
+ struct ValidationContext {
+ ValidationContext(Graph& graph, Dominators& dominators)
+ : graph(graph)
+ , dominators(dominators)
+ , naiveDominators(graph)
+ {
+ }
+
+ void reportError(typename Graph::Node from, typename Graph::Node to, const char* message)
+ {
+ Error error;
+ error.from = from;
+ error.to = to;
+ error.message = message;
+ errors.append(error);
+ }
+
+ void handleErrors()
+ {
+ if (errors.isEmpty())
+ return;
+
+ dataLog("DFG DOMINATOR VALIDATION FAILED:\n");
+ dataLog("\n");
+ dataLog("For block domination relationships:\n");
+ for (unsigned i = 0; i < errors.size(); ++i) {
+ dataLog(
+ " ", graph.dump(errors[i].from), " -> ", graph.dump(errors[i].to),
+ " (", errors[i].message, ")\n");
+ }
+ dataLog("\n");
+ dataLog("Control flow graph:\n");
+ for (unsigned blockIndex = 0; blockIndex < graph.numNodes(); ++blockIndex) {
+ typename Graph::Node block = graph.node(blockIndex);
+ if (!block)
+ continue;
+ dataLog(" Block ", graph.dump(graph.node(blockIndex)), ": successors = [");
+ CommaPrinter comma;
+ for (auto successor : graph.successors(block))
+ dataLog(comma, graph.dump(successor));
+ dataLog("], predecessors = [");
+ comma = CommaPrinter();
+ for (auto predecessor : graph.predecessors(block))
+ dataLog(comma, graph.dump(predecessor));
+ dataLog("]\n");
+ }
+ dataLog("\n");
+ dataLog("Lengauer-Tarjan Dominators:\n");
+ dataLog(dominators);
+ dataLog("\n");
+ dataLog("Naive Dominators:\n");
+ naiveDominators.dump(WTF::dataFile());
+ dataLog("\n");
+ dataLog("Graph at time of failure:\n");
+ dataLog(graph);
+ dataLog("\n");
+ dataLog("DFG DOMINATOR VALIDATION FAILIED!\n");
+ CRASH();
+ }
+
+ Graph& graph;
+ Dominators& dominators;
+ NaiveDominators naiveDominators;
+
+ struct Error {
+ typename Graph::Node from;
+ typename Graph::Node to;
+ const char* message;
+ };
+
+ Vector<Error> errors;
+ };
+
+ bool naiveDominates(typename Graph::Node from, typename Graph::Node to) const
+ {
+ for (typename Graph::Node block = to; block; block = m_data[block].idomParent) {
+ if (block == from)
+ return true;
+ }
+ return false;
+ }
+
+ template<typename Functor>
+ void forAllBlocksInDominanceFrontierOfImpl(
+ typename Graph::Node from, const Functor& functor) const
+ {
+ // Paraphrasing from http://en.wikipedia.org/wiki/Dominator_(graph_theory):
+ // "The dominance frontier of a block 'from' is the set of all blocks 'to' such that
+ // 'from' dominates an immediate predecessor of 'to', but 'from' does not strictly
+ // dominate 'to'."
+ //
+ // A useful corner case to remember: a block may be in its own dominance frontier if it has
+ // a loop edge to itself, since it dominates itself and so it dominates its own immediate
+ // predecessor, and a block never strictly dominates itself.
+
+ forAllBlocksDominatedBy(
+ from,
+ [&] (typename Graph::Node block) {
+ for (typename Graph::Node to : m_graph.successors(block)) {
+ if (!strictlyDominates(from, to))
+ functor(to);
+ }
+ });
+ }
+
+ template<typename Functor>
+ void forAllBlocksInIteratedDominanceFrontierOfImpl(
+ const typename Graph::List& from, const Functor& functor) const
+ {
+ typename Graph::List worklist = from;
+ while (!worklist.isEmpty()) {
+ typename Graph::Node block = worklist.takeLast();
+ forAllBlocksInDominanceFrontierOfImpl(
+ block,
+ [&] (typename Graph::Node otherBlock) {
+ if (functor(otherBlock))
+ worklist.append(otherBlock);
+ });
+ }
+ }
+
+ struct BlockData {
+ BlockData()
+ : idomParent(nullptr)
+ , preNumber(UINT_MAX)
+ , postNumber(UINT_MAX)
+ {
+ }
+
+ Vector<typename Graph::Node> idomKids;
+ typename Graph::Node idomParent;
+
+ unsigned preNumber;
+ unsigned postNumber;
+ };
+
+ Graph& m_graph;
+ typename Graph::template Map<BlockData> m_data;
+};
+
+} // namespace WTF
+
+using WTF::Dominators;
+
+#endif // WTFDominators_h
+
diff --git a/Source/WTF/wtf/DynamicAnnotations.cpp b/Source/WTF/wtf/DynamicAnnotations.cpp
deleted file mode 100644
index b662877a5..000000000
--- a/Source/WTF/wtf/DynamicAnnotations.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2011 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#include "DynamicAnnotations.h"
-
-#if USE(DYNAMIC_ANNOTATIONS) && !USE(DYNAMIC_ANNOTATIONS_NOIMPL)
-
-// Identical code folding(-Wl,--icf=all) countermeasures.
-// This makes all Annotate* functions different, which prevents the linker from
-// folding them.
-#ifdef __COUNTER__
-#define DYNAMIC_ANNOTATIONS_IMPL \
- volatile short lineno = (__LINE__ << 8) + __COUNTER__; \
- (void)lineno;
-#else
-#define DYNAMIC_ANNOTATIONS_IMPL \
- volatile short lineno = (__LINE__ << 8); \
- (void)lineno;
-#endif
-
-void WTFAnnotateBenignRaceSized(const char*, int, const volatile void*, long, const char*)
-{
- DYNAMIC_ANNOTATIONS_IMPL
-}
-
-void WTFAnnotateHappensBefore(const char*, int, const volatile void*)
-{
- DYNAMIC_ANNOTATIONS_IMPL
-}
-
-void WTFAnnotateHappensAfter(const char*, int, const volatile void*)
-{
- DYNAMIC_ANNOTATIONS_IMPL
-}
-
-#endif // USE(DYNAMIC_ANNOTATIONS) && !USE(DYNAMIC_ANNOTATIONS_NOIMPL)
diff --git a/Source/WTF/wtf/DynamicAnnotations.h b/Source/WTF/wtf/DynamicAnnotations.h
deleted file mode 100644
index 2d5a5a9e9..000000000
--- a/Source/WTF/wtf/DynamicAnnotations.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2011 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef WTF_DynamicAnnotations_h
-#define WTF_DynamicAnnotations_h
-
-#include <wtf/Platform.h>
-
-/* This file defines dynamic annotations for use with dynamic analysis
- * tool such as ThreadSanitizer, Valgrind, etc.
- *
- * Dynamic annotation is a source code annotation that affects
- * the generated code (that is, the annotation is not a comment).
- * Each such annotation is attached to a particular
- * instruction and/or to a particular object (address) in the program.
- *
- * By using dynamic annotations a developer can give more details to the dynamic
- * analysis tool to improve its precision.
- *
- * In C/C++ program the annotations are represented as C macros.
- * With the default build flags, these macros are empty, hence don't affect
- * performance of a compiled binary.
- * If dynamic annotations are enabled, they just call no-op functions.
- * The dynamic analysis tools can intercept these functions and replace them
- * with their own implementations.
- *
- * See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations for more information.
- */
-
-#if USE(DYNAMIC_ANNOTATIONS)
-/* Tell data race detector that we're not interested in reports on the given address range. */
-#define WTF_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) WTFAnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
-#define WTF_ANNOTATE_BENIGN_RACE(pointer, description) WTFAnnotateBenignRaceSized(__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
-
-/* Annotations for user-defined synchronization mechanisms.
- * These annotations can be used to define happens-before arcs in user-defined
- * synchronization mechanisms: the race detector will infer an arc from
- * the former to the latter when they share the same argument pointer.
- *
- * The most common case requiring annotations is atomic reference counting:
- * bool deref() {
- * ANNOTATE_HAPPENS_BEFORE(&m_refCount);
- * if (!atomicDecrement(&m_refCount)) {
- * // m_refCount is now 0
- * ANNOTATE_HAPPENS_AFTER(&m_refCount);
- * // "return true; happens-after each atomicDecrement of m_refCount"
- * return true;
- * }
- * return false;
- * }
- */
-#define WTF_ANNOTATE_HAPPENS_BEFORE(address) WTFAnnotateHappensBefore(__FILE__, __LINE__, address)
-#define WTF_ANNOTATE_HAPPENS_AFTER(address) WTFAnnotateHappensAfter(__FILE__, __LINE__, address)
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-/* Don't use these directly, use the above macros instead. */
-void WTFAnnotateBenignRaceSized(const char* file, int line, const volatile void* memory, long size, const char* description);
-void WTFAnnotateHappensBefore(const char* file, int line, const volatile void* address);
-void WTFAnnotateHappensAfter(const char* file, int line, const volatile void* address);
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#else // USE(DYNAMIC_ANNOTATIONS)
-/* These macros are empty when dynamic annotations are not enabled so you can
- * use them without affecting the performance of release binaries. */
-#define WTF_ANNOTATE_BENIGN_RACE_SIZED(address, size, description)
-#define WTF_ANNOTATE_BENIGN_RACE(pointer, description)
-#define WTF_ANNOTATE_HAPPENS_BEFORE(address)
-#define WTF_ANNOTATE_HAPPENS_AFTER(address)
-#endif // USE(DYNAMIC_ANNOTATIONS)
-
-#endif // WTF_DynamicAnnotations_h
diff --git a/Source/WTF/wtf/EnumTraits.h b/Source/WTF/wtf/EnumTraits.h
new file mode 100644
index 000000000..c6bde9fb5
--- /dev/null
+++ b/Source/WTF/wtf/EnumTraits.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <type_traits>
+
+namespace WTF {
+
+template<typename> struct EnumTraits;
+
+template<typename E, E...> struct EnumValues;
+
+template<typename T, typename E> struct EnumValueChecker;
+
+template<typename T, typename E, E e, E... es>
+struct EnumValueChecker<T, EnumValues<E, e, es...>> {
+ static constexpr bool isValidEnum(T t)
+ {
+ return (static_cast<T>(e) == t) ? true : EnumValueChecker<T, EnumValues<E, es...>>::isValidEnum(t);
+ }
+};
+
+template<typename T, typename E>
+struct EnumValueChecker<T, EnumValues<E>> {
+ static constexpr bool isValidEnum(T t)
+ {
+ return false;
+ }
+};
+
+template<typename E, typename T>
+constexpr auto isValidEnum(T t) -> std::enable_if_t<std::is_enum<E>::value, bool>
+{
+ static_assert(sizeof(T) >= std::underlying_type_t<E>(), "Integral type must be at least the size of the underlying enum type");
+
+ return EnumValueChecker<T, typename EnumTraits<E>::values>::isValidEnum(t);
+}
+
+}
+
+using WTF::isValidEnum;
diff --git a/Source/WTF/wtf/Expected.h b/Source/WTF/wtf/Expected.h
new file mode 100644
index 000000000..39c7e876b
--- /dev/null
+++ b/Source/WTF/wtf/Expected.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// WTF::Expected is based on std::expected, as described here: http://wg21.link/p0323r1
+// The specification expects to throw. This implementation doesn't support exceptions, uses RELEASE_ASSERT instead.
+
+#pragma once
+
+#include <cstdlib>
+#include <functional>
+#include <initializer_list>
+#include <type_traits>
+#include <utility>
+#include <wtf/Assertions.h>
+#include <wtf/Compiler.h>
+#include <wtf/Optional.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+template <class E>
+class UnexpectedType {
+public:
+ UnexpectedType() = delete;
+ constexpr explicit UnexpectedType(const E& e) : val(e) { }
+ constexpr explicit UnexpectedType(E&& e) : val(std::forward<E>(e)) { }
+ constexpr const E& value() const& { return val; }
+ RELAXED_CONSTEXPR E& value() & { return val; }
+ RELAXED_CONSTEXPR E&& value() && { return WTFMove(val); }
+
+private:
+ E val;
+};
+
+template <class E> constexpr bool operator==(const UnexpectedType<E>& lhs, const UnexpectedType<E>& rhs) { return lhs.value() == rhs.value(); }
+template <class E> constexpr bool operator!=(const UnexpectedType<E>& lhs, const UnexpectedType<E>& rhs) { return lhs.value() != rhs.value(); }
+template <class E> constexpr bool operator<(const UnexpectedType<E>& lhs, const UnexpectedType<E>& rhs) { return lhs.value() < rhs.value(); }
+template <class E> constexpr bool operator>(const UnexpectedType<E>& lhs, const UnexpectedType<E>& rhs) { return lhs.value() > rhs.value(); }
+template <class E> constexpr bool operator<=(const UnexpectedType<E>& lhs, const UnexpectedType<E>& rhs) { return lhs.value() <= rhs.value(); }
+template <class E> constexpr bool operator>=(const UnexpectedType<E>& lhs, const UnexpectedType<E>& rhs) { return lhs.value() >= rhs.value(); }
+
+template <class E> constexpr UnexpectedType<std::decay_t<E>> makeUnexpected(E&& v) { return UnexpectedType<typename std::decay<E>::type>(std::forward<E>(v)); }
+
+struct UnexpectTag {
+ UnexpectTag() = delete;
+};
+constexpr UnexpectTag Unexpect { };
+
+namespace ExpectedDetail {
+
+// Invoked where std::Expected would instead throw.
+inline NO_RETURN_DUE_TO_CRASH void Throw() { RELEASE_ASSERT_NOT_REACHED(); }
+
+static constexpr enum class ValueTagType { } ValueTag { };
+static constexpr enum class ErrorTagType { } ErrorTag { };
+
+template<class T, std::enable_if_t<std::is_trivially_destructible<T>::value>* = nullptr> void destroy(T&) { }
+template<class T, std::enable_if_t<!std::is_trivially_destructible<T>::value && (std::is_class<T>::value || std::is_union<T>::value)>* = nullptr> void destroy(T& t) { t.~T(); }
+
+template <class T, class E>
+union ConstexprStorage {
+ typedef T ValueType;
+ typedef E ErrorType;
+ char dummy;
+ ValueType val;
+ ErrorType err;
+ constexpr ConstexprStorage() : dummy() { }
+ constexpr ConstexprStorage(ValueTagType) : val() { }
+ constexpr ConstexprStorage(ErrorTagType) : err() { }
+ constexpr ConstexprStorage(ValueTagType, const ValueType& v) : val(v) { }
+ constexpr ConstexprStorage(ErrorTagType, const ErrorType& e) : err(e) { }
+ ~ConstexprStorage() = default;
+};
+
+template <class T, class E>
+union Storage {
+ typedef T ValueType;
+ typedef E ErrorType;
+ char dummy;
+ ValueType val;
+ ErrorType err;
+ constexpr Storage() : dummy() { }
+ constexpr Storage(ValueTagType) : val() { }
+ constexpr Storage(ErrorTagType) : err() { }
+ constexpr Storage(ValueTagType, const ValueType& val) : val(val) { }
+ constexpr Storage(ValueTagType, ValueType&& val) : val(std::forward<ValueType>(val)) { }
+ constexpr Storage(ErrorTagType, const ErrorType& err) : err(err) { }
+ constexpr Storage(ErrorTagType, ErrorType&& err) : err(std::forward<ErrorType>(err)) { }
+ ~Storage() { }
+};
+
+template <class E>
+union ConstexprStorage<void, E> {
+ typedef void ValueType;
+ typedef E ErrorType;
+ char dummy;
+ ErrorType err;
+ constexpr ConstexprStorage() : dummy() { }
+ constexpr ConstexprStorage(ValueTagType) : dummy() { }
+ constexpr ConstexprStorage(ErrorTagType) : err() { }
+ constexpr ConstexprStorage(ErrorTagType, const ErrorType& e) : err(e) { }
+ ~ConstexprStorage() = default;
+};
+
+template <class E>
+union Storage<void, E> {
+ typedef void ValueType;
+ typedef E ErrorType;
+ char dummy;
+ ErrorType err;
+ constexpr Storage() : dummy() { }
+ constexpr Storage(ValueTagType) : dummy() { }
+ constexpr Storage(ErrorTagType) : err() { }
+ constexpr Storage(ErrorTagType, const ErrorType& err) : err(err) { }
+ constexpr Storage(ErrorTagType, ErrorType&& err) : err(std::forward<ErrorType>(err)) { }
+ ~Storage() { }
+};
+
+template <class T, class E>
+struct ConstexprBase {
+ typedef T ValueType;
+ typedef E ErrorType;
+ ConstexprStorage<ValueType, ErrorType> s;
+ bool has;
+ constexpr ConstexprBase() : s(), has(true) { }
+ constexpr ConstexprBase(ValueTagType tag) : s(tag), has(true) { }
+ constexpr ConstexprBase(ErrorTagType tag) : s(tag), has(false) { }
+ constexpr ConstexprBase(ValueTagType tag, const ValueType& val) : s(tag, val), has(true) { }
+ constexpr ConstexprBase(ErrorTagType tag, const ErrorType& err) : s(tag, err), has(false) { }
+ ~ConstexprBase() = default;
+};
+
+template <class T, class E>
+struct Base {
+ typedef T ValueType;
+ typedef E ErrorType;
+ Storage<ValueType, ErrorType> s;
+ bool has;
+ constexpr Base() : s(), has(true) { }
+ constexpr Base(ValueTagType tag) : s(tag), has(true) { }
+ constexpr Base(ErrorTagType tag) : s(tag), has(false) { }
+ constexpr Base(ValueTagType tag, const ValueType& val) : s(tag, val), has(true) { }
+ constexpr Base(ValueTagType tag, ValueType&& val) : s(tag, std::forward<ValueType>(val)), has(true) { }
+ constexpr Base(ErrorTagType tag, const ErrorType& err) : s(tag, err), has(false) { }
+ constexpr Base(ErrorTagType tag, ErrorType&& err) : s(tag, std::forward<ErrorType>(err)), has(false) { }
+ Base(const Base& o)
+ : has(o.has)
+ {
+ if (has)
+ ::new (&s.val) ValueType(o.s.val);
+ else
+ ::new (&s.err) ErrorType(o.s.err);
+ }
+ Base(Base&& o)
+ : has(o.has)
+ {
+ if (has)
+ ::new (&s.val) ValueType(WTFMove(o.s.val));
+ else
+ ::new (&s.err) ErrorType(WTFMove(o.s.err));
+ }
+ ~Base()
+ {
+ if (has)
+ destroy(s.val);
+ else
+ destroy(s.err);
+ }
+};
+
+template <class E>
+struct ConstexprBase<void, E> {
+ typedef void ValueType;
+ typedef E ErrorType;
+ ConstexprStorage<ValueType, ErrorType> s;
+ bool has;
+ constexpr ConstexprBase() : s(), has(true) { }
+ constexpr ConstexprBase(ValueTagType tag) : s(tag), has(true) { }
+ constexpr ConstexprBase(ErrorTagType tag) : s(tag), has(false) { }
+ constexpr ConstexprBase(ErrorTagType tag, const ErrorType& err) : s(tag, err), has(false) { }
+ constexpr ConstexprBase(ErrorTagType tag, ErrorType&& err) : s(tag, std::forward<ErrorType>(err)), has(false) { }
+ ~ConstexprBase() = default;
+};
+
+template <class E>
+struct Base<void, E> {
+ typedef void ValueType;
+ typedef E ErrorType;
+ Storage<ValueType, ErrorType> s;
+ bool has;
+ constexpr Base() : s(), has(true) { }
+ constexpr Base(ValueTagType tag) : s(tag), has(true) { }
+ constexpr Base(ErrorTagType tag) : s(tag), has(false) { }
+ constexpr Base(ErrorTagType tag, const ErrorType& err) : s(tag, err), has(false) { }
+ constexpr Base(ErrorTagType tag, ErrorType&& err) : s(tag, std::forward<ErrorType>(err)), has(false) { }
+ Base(const Base& o)
+ : has(o.has)
+ {
+ if (!has)
+ ::new (&s.err) ErrorType(o.s.err);
+ }
+ Base(Base&& o)
+ : has(o.has)
+ {
+ if (!has)
+ ::new (&s.err) ErrorType(WTFMove(o.s.err));
+ }
+ ~Base()
+ {
+ if (!has)
+ destroy(s.err);
+ }
+};
+
+template <class T, class E>
+using BaseSelect = typename std::conditional<
+ ((std::is_void<T>::value || std::is_trivially_destructible<T>::value)
+ && std::is_trivially_destructible<E>::value),
+ ConstexprBase<typename std::remove_const<T>::type, typename std::remove_const<E>::type>,
+ Base<typename std::remove_const<T>::type, typename std::remove_const<E>::type>
+>::type;
+
+} // namespace ExpectedDetail
+
+template <class T, class E>
+class Expected : private ExpectedDetail::BaseSelect<T, E> {
+ typedef ExpectedDetail::BaseSelect<T, E> base;
+
+public:
+ typedef typename base::ValueType ValueType;
+ typedef typename base::ErrorType ErrorType;
+
+private:
+ typedef Expected<ValueType, ErrorType> type;
+
+public:
+ // template <class U> struct rebind { using type = Expected<U, ErrorType>; };
+
+ constexpr Expected() : base(ExpectedDetail::ValueTag) { }
+ Expected(const Expected&) = default;
+ Expected(Expected&&) = default;
+ constexpr Expected(const ValueType& e) : base(ExpectedDetail::ValueTag, e) { }
+ constexpr Expected(ValueType&& e) : base(ExpectedDetail::ValueTag, std::forward<ValueType>(e)) { }
+ // template <class... Args> constexpr explicit Expected(in_place_t, Args&&...);
+ // template <class U, class... Args> constexpr explicit Expected(in_place_t, std::initializer_list<U>, Args&&...);
+ constexpr Expected(UnexpectedType<ErrorType> const& u) : base(ExpectedDetail::ErrorTag, u.value()) { }
+ constexpr Expected(UnexpectedType<ErrorType>&& u) : base(ExpectedDetail::ErrorTag, std::forward<UnexpectedType<E>>(u).value()) { }
+ template <class Err> constexpr Expected(UnexpectedType<Err> const& u) : base(ExpectedDetail::ErrorTag, u.value()) { }
+ // template <class... Args> constexpr explicit Expected(UnexpectTag, Args&&...);
+ // template <class U, class... Args> constexpr explicit Expected(UnexpectTag, std::initializer_list<U>, Args&&...);
+
+ ~Expected() = default;
+
+ Expected& operator=(const Expected& e) { type(e).swap(*this); return *this; }
+ Expected& operator=(Expected&& e) { type(WTFMove(e)).swap(*this); return *this; }
+ template <class U> Expected& operator=(U&& u) { type(WTFMove(u)).swap(*this); return *this; }
+ Expected& operator=(const UnexpectedType<ErrorType>& u) { type(u).swap(*this); return *this; }
+ Expected& operator=(UnexpectedType<ErrorType>&& u) { type(WTFMove(u)).swap(*this); return *this; }
+ // template <class... Args> void emplace(Args&&...);
+ // template <class U, class... Args> void emplace(std::initializer_list<U>, Args&&...);
+
+ void swap(Expected& o)
+ {
+ using std::swap;
+ if (base::has && o.has)
+ swap(base::s.val, o.s.val);
+ else if (base::has && !o.has) {
+ ErrorType e(WTFMove(o.s.err));
+ ExpectedDetail::destroy(o.s.err);
+ ::new (&o.s.val) ValueType(WTFMove(base::s.val));
+ ExpectedDetail::destroy(base::s.val);
+ ::new (&base::s.err) ErrorType(WTFMove(e));
+ swap(base::has, o.has);
+ } else if (!base::has && o.has) {
+ ValueType v(WTFMove(o.s.val));
+ ExpectedDetail::destroy(o.s.val);
+ ::new (&o.s.err) ErrorType(WTFMove(base::s.err));
+ ExpectedDetail::destroy(base::s.err);
+ ::new (&base::s.val) ValueType(WTFMove(v));
+ swap(base::has, o.has);
+ } else
+ swap(base::s.err, o.s.err);
+ }
+
+ constexpr const ValueType* operator->() const { return &base::s.val; }
+ ValueType* operator->() { return &base::s.val; }
+ constexpr const ValueType& operator*() const & { return base::s.val; }
+ ValueType& operator*() & { return base::s.val; }
+ constexpr const ValueType&& operator*() const && { return WTFMove(base::s.val); }
+ RELAXED_CONSTEXPR ValueType&& operator*() && { return WTFMove(base::s.val); }
+ constexpr explicit operator bool() const { return base::has; }
+ constexpr bool hasValue() const { return base::has; }
+ constexpr const ValueType& value() const & { return base::has ? base::s.val : (ExpectedDetail::Throw(), base::s.val); }
+ RELAXED_CONSTEXPR ValueType& value() & { return base::has ? base::s.val : (ExpectedDetail::Throw(), base::s.val); }
+ constexpr const ValueType&& value() const && { return base::has ? base::s.val : (ExpectedDetail::Throw(), base::s.val); }
+ RELAXED_CONSTEXPR ValueType&& value() && { return WTFMove(base::has ? base::s.val : (ExpectedDetail::Throw(), base::s.val)); }
+ constexpr const ErrorType& error() const & { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); }
+ ErrorType& error() & { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); }
+ RELAXED_CONSTEXPR ErrorType&& error() && { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); }
+ constexpr const ErrorType&& error() const && { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); }
+ constexpr UnexpectedType<ErrorType> getUnexpected() const { return UnexpectedType<ErrorType>(base::s.err); }
+ template <class U> constexpr ValueType valueOr(U&& u) const & { return base::has ? **this : static_cast<ValueType>(std::forward<U>(u)); }
+ template <class U> ValueType valueOr(U&& u) && { return base::has ? WTFMove(**this) : static_cast<ValueType>(std::forward<U>(u)); }
+};
+
+template <class E>
+class Expected<void, E> : private ExpectedDetail::BaseSelect<void, E> {
+ typedef ExpectedDetail::BaseSelect<void, E> base;
+
+public:
+ typedef typename base::ValueType ValueType;
+ typedef typename base::ErrorType ErrorType;
+
+private:
+ typedef Expected<ValueType, ErrorType> type;
+
+public:
+ // template <class U> struct rebind { typedef Expected<U, ErrorType> type; };
+
+ constexpr Expected() : base(ExpectedDetail::ValueTag) { }
+ Expected(const Expected&) = default;
+ Expected(Expected&&) = default;
+ // constexpr explicit Expected(in_place_t);
+ constexpr Expected(UnexpectedType<E> const& u) : base(ExpectedDetail::ErrorTag, u.value()) { }
+ constexpr Expected(UnexpectedType<E>&& u) : base(ExpectedDetail::ErrorTag, std::forward<UnexpectedType<E>>(u).value()) { }
+ template <class Err> constexpr Expected(UnexpectedType<Err> const& u) : base(ExpectedDetail::ErrorTag, u.value()) { }
+
+ ~Expected() = default;
+
+ Expected& operator=(const Expected& e) { type(e).swap(*this); return *this; }
+ Expected& operator=(Expected&& e) { type(WTFMove(e)).swap(*this); return *this; }
+ Expected& operator=(const UnexpectedType<E>& u) { type(u).swap(*this); return *this; } // Not in the current paper.
+ Expected& operator=(UnexpectedType<E>&& u) { type(WTFMove(u)).swap(*this); return *this; } // Not in the current paper.
+ // void emplace();
+
+ void swap(Expected& o)
+ {
+ using std::swap;
+ if (base::has && o.has) {
+ // Do nothing.
+ } else if (base::has && !o.has) {
+ ErrorType e(WTFMove(o.s.err));
+ ::new (&base::s.err) ErrorType(e);
+ swap(base::has, o.has);
+ } else if (!base::has && o.has) {
+ ::new (&o.s.err) ErrorType(WTFMove(base::s.err));
+ swap(base::has, o.has);
+ } else
+ swap(base::s.err, o.s.err);
+ }
+
+ constexpr explicit operator bool() const { return base::has; }
+ constexpr bool hasValue() const { return base::has; }
+ void value() const { !base::has ? ExpectedDetail::Throw() : void(); }
+ constexpr const E& error() const & { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); }
+ E& error() & { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); } // Not in the current paper.
+ RELAXED_CONSTEXPR E&& error() && { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); }
+ constexpr const E&& error() const && { return !base::has ? base::s.err : (ExpectedDetail::Throw(), base::s.err); } // Not in the current paper.
+ // constexpr E& error() &;
+ constexpr UnexpectedType<E> getUnexpected() const { return UnexpectedType<E>(base::s.err); }
+};
+
+template <class T, class E> constexpr bool operator==(const Expected<T, E>& x, const Expected<T, E>& y) { return bool(x) == bool(y) && (x ? x.value() == y.value() : x.error() == y.error()); }
+template <class T, class E> constexpr bool operator!=(const Expected<T, E>& x, const Expected<T, E>& y) { return !(x == y); }
+template <class T, class E> constexpr bool operator<(const Expected<T, E>& x, const Expected<T, E>& y) { return (!bool(x) && bool(y)) ? false : ((bool(x) && !bool(y)) ? true : ((bool(x) && bool(y)) ? x.value() < y.value() : x.error() < y.error())); }
+template <class T, class E> constexpr bool operator>(const Expected<T, E>& x, const Expected<T, E>& y) { return !(x == y) && !(x < y); }
+template <class T, class E> constexpr bool operator<=(const Expected<T, E>& x, const Expected<T, E>& y) { return (x == y) || (x < y); }
+template <class T, class E> constexpr bool operator>=(const Expected<T, E>& x, const Expected<T, E>& y) { return (x == y) || (x > y); }
+
+template <class E> constexpr bool operator==(const Expected<void, E>& x, const Expected<void, E>& y) { return bool(x) == bool(y) && (x ? true : x.error() == y.error()); } // Not in the current paper.
+template <class E> constexpr bool operator<(const Expected<void, E>& x, const Expected<void, E>& y) { return (!bool(x) && bool(y)) ? false : ((bool(x) && !bool(y)) ? true : ((bool(x) && bool(y)) ? false : x.error() < y.error())); } // Not in the current paper.
+
+template <class T, class E> constexpr bool operator==(const Expected<T, E>& x, const T& y) { return x == Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator==(const T& x, const Expected<T, E>& y) { return Expected<T, E>(x) == y; }
+template <class T, class E> constexpr bool operator!=(const Expected<T, E>& x, const T& y) { return x != Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator!=(const T& x, const Expected<T, E>& y) { return Expected<T, E>(x) != y; }
+template <class T, class E> constexpr bool operator<(const Expected<T, E>& x, const T& y) { return x < Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator<(const T& x, const Expected<T, E>& y) { return Expected<T, E>(x) < y; }
+template <class T, class E> constexpr bool operator<=(const Expected<T, E>& x, const T& y) { return x <= Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator<=(const T& x, const Expected<T, E>& y) { return Expected<T, E>(x) <= y; }
+template <class T, class E> constexpr bool operator>(const Expected<T, E>& x, const T& y) { return x > Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator>(const T& x, const Expected<T, E>& y) { return Expected<T, E>(x) > y; }
+template <class T, class E> constexpr bool operator>=(const Expected<T, E>& x, const T& y) { return x >= Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator>=(const T& x, const Expected<T, E>& y) { return Expected<T, E>(x) >= y; }
+
+template <class T, class E> constexpr bool operator==(const Expected<T, E>& x, const UnexpectedType<E>& y) { return x == Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator==(const UnexpectedType<E>& x, const Expected<T, E>& y) { return Expected<T, E>(x) == y; }
+template <class T, class E> constexpr bool operator!=(const Expected<T, E>& x, const UnexpectedType<E>& y) { return x != Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator!=(const UnexpectedType<E>& x, const Expected<T, E>& y) { return Expected<T, E>(x) != y; }
+template <class T, class E> constexpr bool operator<(const Expected<T, E>& x, const UnexpectedType<E>& y) { return x < Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator<(const UnexpectedType<E>& x, const Expected<T, E>& y) { return Expected<T, E>(x) < y; }
+template <class T, class E> constexpr bool operator<=(const Expected<T, E>& x, const UnexpectedType<E>& y) { return x <= Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator<=(const UnexpectedType<E>& x, const Expected<T, E>& y) { return Expected<T, E>(x) <= y; }
+template <class T, class E> constexpr bool operator>(const Expected<T, E>& x, const UnexpectedType<E>& y) { return x > Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator>(const UnexpectedType<E>& x, const Expected<T, E>& y) { return Expected<T, E>(x) > y; }
+template <class T, class E> constexpr bool operator>=(const Expected<T, E>& x, const UnexpectedType<E>& y) { return x >= Expected<T, E>(y); }
+template <class T, class E> constexpr bool operator>=(const UnexpectedType<E>& x, const Expected<T, E>& y) { return Expected<T, E>(x) >= y; }
+
+template <typename T, typename E> void swap(Expected<T, E>& x, Expected<T, E>& y) { x.swap(y); }
+
+template <class T, class E = std::nullopt_t> constexpr Expected<std::decay_t<T>, E> makeExpected(T&& v)
+{
+ return Expected<typename std::decay<T>::type, E>(std::forward<T>(v));
+}
+template <class T, class E> constexpr Expected<T, std::decay_t<E>> makeExpectedFromError(E&& e) { return Expected<T, std::decay_t<E>>(makeUnexpected(e)); }
+template <class T, class E, class U> constexpr Expected<T, E> makeExpectedFromError(U&& u) { return Expected<T, E>(makeUnexpected(E { std::forward<U>(u) } )); }
+// template <class F, class E = std::nullopt_t> constexpr Expected<typename std::result_of<F>::type, E> makeExpected_from_call(F f);
+
+inline Expected<void, std::nullopt_t> makeExpected() { return Expected<void, std::nullopt_t>(); }
+
+} // namespace WTF
+
+namespace std {
+
+template <class T, class E> struct hash<WTF::Expected<T, E>> {
+ typedef WTF::Expected<T, E> argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(argument_type const& e) const { return e ? hash<typename argument_type::ValueType> { } (e.value()) : hash<typename argument_type::ErrorType> { } (e.error()); }
+};
+
+template <class E> struct hash<WTF::Expected<void, E>> {
+ typedef WTF::Expected<void, E> argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(argument_type const& e) const { return e ? 0 : hash<typename argument_type::ErrorType> { } (e.error()); }
+};
+
+}
+
+using WTF::UnexpectedType;
+using WTF::makeUnexpected;
+using WTF::Unexpect;
+using WTF::Expected;
+using WTF::makeExpected;
+using WTF::makeExpectedFromError;
diff --git a/Source/WTF/wtf/ExportMacros.h b/Source/WTF/wtf/ExportMacros.h
index 920fdfe9e..5ae87d05d 100644
--- a/Source/WTF/wtf/ExportMacros.h
+++ b/Source/WTF/wtf/ExportMacros.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -89,13 +89,13 @@
#else // !USE(EXPORT_MACROS)
-#if OS(WINDOWS) && !COMPILER(GCC)
+#if OS(WINDOWS) && !COMPILER(GCC_OR_CLANG)
#if defined(BUILDING_WTF) || defined(STATICALLY_LINKED_WITH_WTF)
#define WTF_EXPORTDATA __declspec(dllexport)
#else
#define WTF_EXPORTDATA __declspec(dllimport)
#endif
-#else // !OS(WINDOWS) || COMPILER(GCC)
+#else // !OS(WINDOWS) || COMPILER(GCC_OR_CLANG)
#define WTF_EXPORTDATA
#endif
@@ -107,24 +107,6 @@
#endif // USE(EXPORT_MACROS)
-// WTF_TESTING (and WEBCORE_TESTING in PlatformExportMacros.h) is used for
-// exporting symbols which are referred from WebCoreTestSupport library.
-// Since the set of APIs is common between ports,
-// it is rather worth annotating inside the code than maintaining port specific export lists.
-#if USE(EXPORT_MACROS_FOR_TESTING)
-
-#if defined(WTF_IS_LINKED_IN_SAME_BINARY)
-#define WTF_TESTING WTF_EXPORT_DECLARATION
-#else
-#define WTF_TESTING WTF_IMPORT_DECLARATION
-#endif
-
-#else // USE(EXPORT_MACROS_FOR_TESTING)
-
-#define WTF_TESTING
-
-#endif // USE(EXPORT_MACROS_FOR_TESTING)
-
#if defined(WTF_IS_LINKED_IN_SAME_BINARY)
#define WTF_EXPORT_PRIVATE WTF_EXPORT
#else
diff --git a/Source/WTF/wtf/FastBitVector.cpp b/Source/WTF/wtf/FastBitVector.cpp
index 3abccea73..eed316975 100644
--- a/Source/WTF/wtf/FastBitVector.cpp
+++ b/Source/WTF/wtf/FastBitVector.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,14 +26,50 @@
#include "config.h"
#include "FastBitVector.h"
-#include "PrintStream.h"
-
namespace WTF {
-void FastBitVector::dump(PrintStream& out) const
+void FastBitVectorWordOwner::setEqualsSlow(const FastBitVectorWordOwner& other)
+{
+ uint32_t* newArray = static_cast<uint32_t*>(
+ fastCalloc(other.arrayLength(), sizeof(uint32_t)));
+ memcpy(newArray, other.m_words, other.arrayLength() * sizeof(uint32_t));
+ if (m_words)
+ fastFree(m_words);
+ m_words = newArray;
+ m_numBits = other.m_numBits;
+}
+
+void FastBitVectorWordOwner::resizeSlow(size_t numBits)
+{
+ size_t newLength = fastBitVectorArrayLength(numBits);
+
+ // Use fastCalloc instead of fastRealloc because we expect the common
+ // use case for this method to be initializing the size of the bitvector.
+
+ uint32_t* newArray = static_cast<uint32_t*>(fastCalloc(newLength, sizeof(uint32_t)));
+ memcpy(newArray, m_words, arrayLength() * sizeof(uint32_t));
+ if (m_words)
+ fastFree(m_words);
+ m_words = newArray;
+}
+
+void FastBitVector::clearRange(size_t begin, size_t end)
{
- for (unsigned i = 0; i < m_numBits; ++i)
- out.print(get(i) ? "1" : "-");
+ if (end - begin < 32) {
+ for (size_t i = begin; i < end; ++i)
+ at(i) = false;
+ return;
+ }
+
+ size_t endBeginSlop = (begin + 31) & ~31;
+ size_t beginEndSlop = end & ~31;
+
+ for (size_t i = begin; i < endBeginSlop; ++i)
+ at(i) = false;
+ for (size_t i = beginEndSlop; i < end; ++i)
+ at(i) = false;
+ for (size_t i = endBeginSlop / 32; i < beginEndSlop / 32; ++i)
+ m_words.word(i) = 0;
}
} // namespace WTF
diff --git a/Source/WTF/wtf/FastBitVector.h b/Source/WTF/wtf/FastBitVector.h
index f96180d9e..9119a1d51 100644
--- a/Source/WTF/wtf/FastBitVector.h
+++ b/Source/WTF/wtf/FastBitVector.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,173 +23,558 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FastBitVector_h
-#define FastBitVector_h
+#pragma once
#include <string.h>
+#include <wtf/Atomics.h>
#include <wtf/FastMalloc.h>
+#include <wtf/PrintStream.h>
#include <wtf/StdLibExtras.h>
namespace WTF {
class PrintStream;
-class FastBitVector {
+inline size_t fastBitVectorArrayLength(size_t numBits) { return (numBits + 31) / 32; }
+
+class FastBitVectorWordView {
public:
- FastBitVector()
- : m_array(0)
- , m_numBits(0)
+ typedef FastBitVectorWordView ViewType;
+
+ FastBitVectorWordView() { }
+
+ FastBitVectorWordView(const uint32_t* array, size_t numBits)
+ : m_words(array)
+ , m_numBits(numBits)
{
}
- FastBitVector(const FastBitVector& other)
- : m_array(0)
- , m_numBits(0)
+ size_t numBits() const
+ {
+ return m_numBits;
+ }
+
+ uint32_t word(size_t index) const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(index < fastBitVectorArrayLength(numBits()));
+ return m_words[index];
+ }
+
+private:
+ const uint32_t* m_words { nullptr };
+ size_t m_numBits { 0 };
+};
+
+class FastBitVectorWordOwner {
+public:
+ typedef FastBitVectorWordView ViewType;
+
+ FastBitVectorWordOwner() = default;
+
+ FastBitVectorWordOwner(FastBitVectorWordOwner&& other)
+ : m_words(std::exchange(other.m_words, nullptr))
+ , m_numBits(std::exchange(other.m_numBits, 0))
+ {
+ }
+
+ FastBitVectorWordOwner(const FastBitVectorWordOwner& other)
{
*this = other;
}
- ~FastBitVector()
+ ~FastBitVectorWordOwner()
{
- if (m_array)
- fastFree(m_array);
+ if (m_words)
+ fastFree(m_words);
}
- FastBitVector& operator=(const FastBitVector& other)
+ FastBitVectorWordView view() const { return FastBitVectorWordView(m_words, m_numBits); }
+
+ FastBitVectorWordOwner& operator=(const FastBitVectorWordOwner& other)
{
- size_t length = other.arrayLength();
- uint32_t* newArray = static_cast<uint32_t*>(fastCalloc(length, 4));
- memcpy(newArray, other.m_array, length * 4);
- if (m_array)
- fastFree(m_array);
- m_array = newArray;
- m_numBits = other.m_numBits;
+ if (arrayLength() != other.arrayLength())
+ setEqualsSlow(other);
+ else {
+ memcpy(m_words, other.m_words, arrayLength() * sizeof(uint32_t));
+ m_numBits = other.m_numBits;
+ }
return *this;
}
- size_t numBits() const { return m_numBits; }
+ FastBitVectorWordOwner& operator=(FastBitVectorWordOwner&& other)
+ {
+ std::swap(m_words, other.m_words);
+ std::swap(m_numBits, other.m_numBits);
+ return *this;
+ }
+
+ void setAll()
+ {
+ memset(m_words, 255, arrayLength() * sizeof(uint32_t));
+ }
+
+ void clearAll()
+ {
+ memset(m_words, 0, arrayLength() * sizeof(uint32_t));
+ }
+
+ void set(const FastBitVectorWordOwner& other)
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(m_numBits == other.m_numBits);
+ memcpy(m_words, other.m_words, arrayLength() * sizeof(uint32_t));
+ }
+
+ size_t numBits() const
+ {
+ return m_numBits;
+ }
+
+ size_t arrayLength() const
+ {
+ return fastBitVectorArrayLength(numBits());
+ }
void resize(size_t numBits)
{
- // Use fastCalloc instead of fastRealloc because we expect the common
- // use case for this method to be initializing the size of the bitvector.
-
- size_t newLength = arrayLength(numBits);
- uint32_t* newArray = static_cast<uint32_t*>(fastCalloc(newLength, 4));
- memcpy(newArray, m_array, arrayLength() * 4);
- if (m_array)
- fastFree(m_array);
- m_array = newArray;
+ if (arrayLength() != fastBitVectorArrayLength(numBits))
+ resizeSlow(numBits);
m_numBits = numBits;
}
- void setAll()
+ uint32_t word(size_t index) const
{
- memset(m_array, 255, arrayLength() * 4);
+ ASSERT_WITH_SECURITY_IMPLICATION(index < arrayLength());
+ return m_words[index];
}
- void clearAll()
+ uint32_t& word(size_t index)
{
- memset(m_array, 0, arrayLength() * 4);
+ ASSERT_WITH_SECURITY_IMPLICATION(index < arrayLength());
+ return m_words[index];
}
- void set(const FastBitVector& other)
+ const uint32_t* words() const { return m_words; }
+ uint32_t* words() { return m_words; }
+
+private:
+ WTF_EXPORT_PRIVATE void setEqualsSlow(const FastBitVectorWordOwner& other);
+ WTF_EXPORT_PRIVATE void resizeSlow(size_t numBits);
+
+ uint32_t* m_words { nullptr };
+ size_t m_numBits { 0 };
+};
+
+template<typename Left, typename Right>
+class FastBitVectorAndWords {
+public:
+ typedef FastBitVectorAndWords ViewType;
+
+ FastBitVectorAndWords(const Left& left, const Right& right)
+ : m_left(left)
+ , m_right(right)
{
- ASSERT(m_numBits == other.m_numBits);
- memcpy(m_array, other.m_array, arrayLength() * 4);
+ ASSERT_WITH_SECURITY_IMPLICATION(m_left.numBits() == m_right.numBits());
}
- bool setAndCheck(const FastBitVector& other)
+ FastBitVectorAndWords view() const { return *this; }
+
+ size_t numBits() const
{
- bool changed = false;
- ASSERT(m_numBits == other.m_numBits);
- for (unsigned i = arrayLength(); i--;) {
- changed |= m_array[i] != other.m_array[i];
- m_array[i] = other.m_array[i];
+ return m_left.numBits();
+ }
+
+ uint32_t word(size_t index) const
+ {
+ return m_left.word(index) & m_right.word(index);
+ }
+
+private:
+ Left m_left;
+ Right m_right;
+};
+
+template<typename Left, typename Right>
+class FastBitVectorOrWords {
+public:
+ typedef FastBitVectorOrWords ViewType;
+
+ FastBitVectorOrWords(const Left& left, const Right& right)
+ : m_left(left)
+ , m_right(right)
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(m_left.numBits() == m_right.numBits());
+ }
+
+ FastBitVectorOrWords view() const { return *this; }
+
+ size_t numBits() const
+ {
+ return m_left.numBits();
+ }
+
+ uint32_t word(size_t index) const
+ {
+ return m_left.word(index) | m_right.word(index);
+ }
+
+private:
+ Left m_left;
+ Right m_right;
+};
+
+template<typename View>
+class FastBitVectorNotWords {
+public:
+ typedef FastBitVectorNotWords ViewType;
+
+ FastBitVectorNotWords(const View& view)
+ : m_view(view)
+ {
+ }
+
+ FastBitVectorNotWords view() const { return *this; }
+
+ size_t numBits() const
+ {
+ return m_view.numBits();
+ }
+
+ uint32_t word(size_t index) const
+ {
+ return ~m_view.word(index);
+ }
+
+private:
+ View m_view;
+};
+
+class FastBitVector;
+
+template<typename Words>
+class FastBitVectorImpl {
+public:
+ FastBitVectorImpl()
+ : m_words()
+ {
+ }
+
+ FastBitVectorImpl(const Words& words)
+ : m_words(words)
+ {
+ }
+
+ FastBitVectorImpl(Words&& words)
+ : m_words(WTFMove(words))
+ {
+ }
+
+ size_t numBits() const { return m_words.numBits(); }
+ size_t size() const { return numBits(); }
+
+ size_t arrayLength() const { return fastBitVectorArrayLength(numBits()); }
+
+ template<typename Other>
+ bool operator==(const Other& other) const
+ {
+ if (numBits() != other.numBits())
+ return false;
+ for (size_t i = arrayLength(); i--;) {
+ if (m_words.word(i) != other.m_words.word(i))
+ return false;
}
- return changed;
+ return true;
}
- bool equals(const FastBitVector& other) const
+ template<typename Other>
+ bool operator!=(const Other& other) const
{
- ASSERT(m_numBits == other.m_numBits);
- // Use my own comparison loop because memcmp does more than what I want
- // and bcmp is not as standard.
- for (unsigned i = arrayLength(); i--;) {
- if (m_array[i] != other.m_array[i])
+ return !(*this == other);
+ }
+
+ bool at(size_t index) const
+ {
+ return atImpl(index);
+ }
+
+ bool operator[](size_t index) const
+ {
+ return atImpl(index);
+ }
+
+ size_t bitCount() const
+ {
+ size_t result = 0;
+ for (size_t index = arrayLength(); index--;)
+ result += WTF::bitCount(m_words.word(index));
+ return result;
+ }
+
+ bool isEmpty() const
+ {
+ for (size_t index = arrayLength(); index--;) {
+ if (m_words.word(index))
return false;
}
return true;
}
- void merge(const FastBitVector& other)
+ template<typename OtherWords>
+ FastBitVectorImpl<FastBitVectorAndWords<typename Words::ViewType, typename OtherWords::ViewType>> operator&(const FastBitVectorImpl<OtherWords>& other) const
{
- ASSERT(m_numBits == other.m_numBits);
- for (unsigned i = arrayLength(); i--;)
- m_array[i] |= other.m_array[i];
+ return FastBitVectorImpl<FastBitVectorAndWords<typename Words::ViewType, typename OtherWords::ViewType>>(FastBitVectorAndWords<typename Words::ViewType, typename OtherWords::ViewType>(wordView(), other.wordView()));
}
- void filter(const FastBitVector& other)
+ template<typename OtherWords>
+ FastBitVectorImpl<FastBitVectorOrWords<typename Words::ViewType, typename OtherWords::ViewType>> operator|(const FastBitVectorImpl<OtherWords>& other) const
{
- ASSERT(m_numBits == other.m_numBits);
- for (unsigned i = arrayLength(); i--;)
- m_array[i] &= other.m_array[i];
+ return FastBitVectorImpl<FastBitVectorOrWords<typename Words::ViewType, typename OtherWords::ViewType>>(FastBitVectorOrWords<typename Words::ViewType, typename OtherWords::ViewType>(wordView(), other.wordView()));
}
- void exclude(const FastBitVector& other)
+ FastBitVectorImpl<FastBitVectorNotWords<typename Words::ViewType>> operator~() const
{
- ASSERT(m_numBits == other.m_numBits);
- for (unsigned i = arrayLength(); i--;)
- m_array[i] &= ~other.m_array[i];
+ return FastBitVectorImpl<FastBitVectorNotWords<typename Words::ViewType>>(FastBitVectorNotWords<typename Words::ViewType>(wordView()));
}
- void set(size_t i)
+ template<typename Func>
+ ALWAYS_INLINE void forEachSetBit(const Func& func) const
{
- ASSERT_WITH_SECURITY_IMPLICATION(i < m_numBits);
- m_array[i >> 5] |= (1 << (i & 31));
+ size_t n = arrayLength();
+ for (size_t i = 0; i < n; ++i) {
+ uint32_t word = m_words.word(i);
+ size_t j = i * 32;
+ while (word) {
+ if (word & 1)
+ func(j);
+ word >>= 1;
+ j++;
+ }
+ }
}
- void clear(size_t i)
+ template<typename Func>
+ ALWAYS_INLINE void forEachClearBit(const Func& func) const
{
- ASSERT_WITH_SECURITY_IMPLICATION(i < m_numBits);
- m_array[i >> 5] &= ~(1 << (i & 31));
+ (~*this).forEachSetBit(func);
}
- void set(size_t i, bool value)
+ template<typename Func>
+ void forEachBit(bool value, const Func& func) const
{
if (value)
- set(i);
+ forEachSetBit(func);
else
- clear(i);
+ forEachClearBit(func);
}
- bool get(size_t i) const
+ // Starts looking for bits at the index you pass. If that index contains the value you want,
+ // then it will return that index. Returns numBits when we get to the end. For example, you
+ // can write a loop to iterate over all set bits like this:
+ //
+ // for (size_t i = 0; i < bits.numBits(); i = bits.findBit(i + 1, true))
+ // ...
+ ALWAYS_INLINE size_t findBit(size_t startIndex, bool value) const
{
- ASSERT_WITH_SECURITY_IMPLICATION(i < m_numBits);
- return !!(m_array[i >> 5] & (1 << (i & 31)));
+ // If value is true, this produces 0. If value is false, this produces UINT_MAX. It's
+ // written this way so that it performs well regardless of whether value is a constant.
+ uint32_t skipValue = -(static_cast<uint32_t>(value) ^ 1);
+
+ size_t numWords = fastBitVectorArrayLength(m_words.numBits());
+
+ size_t wordIndex = startIndex / 32;
+ size_t startIndexInWord = startIndex - wordIndex * 32;
+
+ while (wordIndex < numWords) {
+ uint32_t word = m_words.word(wordIndex);
+ if (word != skipValue) {
+ size_t index = startIndexInWord;
+ if (findBitInWord(word, index, 32, value))
+ return wordIndex * 32 + index;
+ }
+
+ wordIndex++;
+ startIndexInWord = 0;
+ }
+
+ return numBits();
}
- size_t bitCount() const
+ ALWAYS_INLINE size_t findSetBit(size_t index) const
{
- size_t result = 0;
- for (unsigned i = arrayLength(); i--;)
- result += WTF::bitCount(m_array[i]);
- return result;
+ return findBit(index, true);
+ }
+
+ ALWAYS_INLINE size_t findClearBit(size_t index) const
+ {
+ return findBit(index, false);
+ }
+
+ void dump(PrintStream& out) const
+ {
+ for (size_t i = 0; i < numBits(); ++i)
+ out.print((*this)[i] ? "1" : "-");
}
- WTF_EXPORT_PRIVATE void dump(PrintStream&) const;
+ typename Words::ViewType wordView() const { return m_words.view(); }
private:
- static size_t arrayLength(size_t numBits) { return (numBits + 31) >> 5; }
- size_t arrayLength() const { return arrayLength(m_numBits); }
+ // You'd think that we could remove this friend if we used protected, but you'd be wrong,
+ // because templates.
+ friend class FastBitVector;
+
+ bool atImpl(size_t index) const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(index < numBits());
+ return !!(m_words.word(index >> 5) & (1 << (index & 31)));
+ }
- uint32_t* m_array; // No, this can't be an std::unique_ptr<uint32_t[]>.
- size_t m_numBits;
+ Words m_words;
};
-} // namespace WTF
+class FastBitVector : public FastBitVectorImpl<FastBitVectorWordOwner> {
+public:
+ FastBitVector() { }
+
+ FastBitVector(const FastBitVector&) = default;
+ FastBitVector& operator=(const FastBitVector&) = default;
+
+ template<typename OtherWords>
+ FastBitVector(const FastBitVectorImpl<OtherWords>& other)
+ {
+ *this = other;
+ }
+
+ template<typename OtherWords>
+ FastBitVector& operator=(const FastBitVectorImpl<OtherWords>& other)
+ {
+ if (UNLIKELY(numBits() != other.numBits()))
+ resize(other.numBits());
+
+ for (unsigned i = arrayLength(); i--;)
+ m_words.word(i) = other.m_words.word(i);
+ return *this;
+ }
+
+ void resize(size_t numBits)
+ {
+ m_words.resize(numBits);
+ }
+
+ void setAll()
+ {
+ m_words.setAll();
+ }
+
+ void clearAll()
+ {
+ m_words.clearAll();
+ }
+
+ WTF_EXPORT_PRIVATE void clearRange(size_t begin, size_t end);
-using WTF::FastBitVector;
+ // Returns true if the contents of this bitvector changed.
+ template<typename OtherWords>
+ bool setAndCheck(const FastBitVectorImpl<OtherWords>& other)
+ {
+ bool changed = false;
+ ASSERT_WITH_SECURITY_IMPLICATION(numBits() == other.numBits());
+ for (unsigned i = arrayLength(); i--;) {
+ changed |= m_words.word(i) != other.m_words.word(i);
+ m_words.word(i) = other.m_words.word(i);
+ }
+ return changed;
+ }
+
+ template<typename OtherWords>
+ FastBitVector& operator|=(const FastBitVectorImpl<OtherWords>& other)
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(numBits() == other.numBits());
+ for (unsigned i = arrayLength(); i--;)
+ m_words.word(i) |= other.m_words.word(i);
+ return *this;
+ }
+
+ template<typename OtherWords>
+ FastBitVector& operator&=(const FastBitVectorImpl<OtherWords>& other)
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(numBits() == other.numBits());
+ for (unsigned i = arrayLength(); i--;)
+ m_words.word(i) &= other.m_words.word(i);
+ return *this;
+ }
+
+ bool at(size_t index) const
+ {
+ return atImpl(index);
+ }
+
+ bool operator[](size_t index) const
+ {
+ return atImpl(index);
+ }
+
+ class BitReference {
+ public:
+ BitReference() { }
+
+ BitReference(uint32_t* word, uint32_t mask)
+ : m_word(word)
+ , m_mask(mask)
+ {
+ }
+
+ explicit operator bool() const
+ {
+ return !!(*m_word & m_mask);
+ }
+
+ BitReference& operator=(bool value)
+ {
+ if (value)
+ *m_word |= m_mask;
+ else
+ *m_word &= ~m_mask;
+ return *this;
+ }
+
+ private:
+ uint32_t* m_word { nullptr };
+ uint32_t m_mask { 0 };
+ };
+
+ BitReference at(size_t index)
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(index < numBits());
+ return BitReference(&m_words.word(index >> 5), 1 << (index & 31));
+ }
+
+ BitReference operator[](size_t index)
+ {
+ return at(index);
+ }
+
+ // Returns true if the contents changed.
+ ALWAYS_INLINE bool atomicSetAndCheck(size_t index, bool value)
+ {
+ uint32_t* pointer = &m_words.word(index >> 5);
+ uint32_t mask = 1 << (index & 31);
+ for (;;) {
+ uint32_t oldValue = *pointer;
+ uint32_t newValue;
+ if (value) {
+ if (oldValue & mask)
+ return false;
+ newValue = oldValue | mask;
+ } else {
+ if (!(oldValue & mask))
+ return false;
+ newValue = oldValue & ~mask;
+ }
+ if (atomicCompareExchangeWeakRelaxed(pointer, oldValue, newValue))
+ return true;
+ }
+ }
+};
-#endif // FastBitVector_h
+} // namespace WTF
+using WTF::FastBitVector;
diff --git a/Source/WTF/wtf/FastMalloc.cpp b/Source/WTF/wtf/FastMalloc.cpp
index dcf0937b6..478a198ec 100644
--- a/Source/WTF/wtf/FastMalloc.cpp
+++ b/Source/WTF/wtf/FastMalloc.cpp
@@ -1,209 +1,77 @@
-// Copyright (c) 2005, 2007, Google Inc.
-// All rights reserved.
-// Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// A malloc that uses a per-thread cache to satisfy small malloc requests.
-// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
-//
-// See doc/tcmalloc.html for a high-level
-// description of how this malloc works.
-//
-// SYNCHRONIZATION
-// 1. The thread-specific lists are accessed without acquiring any locks.
-// This is safe because each such list is only accessed by one thread.
-// 2. We have a lock per central free-list, and hold it while manipulating
-// the central free list for a particular size.
-// 3. The central page allocator is protected by "pageheap_lock".
-// 4. The pagemap (which maps from page-number to descriptor),
-// can be read without holding any locks, and written while holding
-// the "pageheap_lock".
-// 5. To improve performance, a subset of the information one can get
-// from the pagemap is cached in a data structure, pagemap_cache_,
-// that atomically reads and writes its entries. This cache can be
-// read and written without locking.
-//
-// This multi-threaded access to the pagemap is safe for fairly
-// subtle reasons. We basically assume that when an object X is
-// allocated by thread A and deallocated by thread B, there must
-// have been appropriate synchronization in the handoff of object
-// X from thread A to thread B. The same logic applies to pagemap_cache_.
-//
-// THE PAGEID-TO-SIZECLASS CACHE
-// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
-// returns 0 for a particular PageID then that means "no information," not that
-// the sizeclass is 0. The cache may have stale information for pages that do
-// not hold the beginning of any free()'able object. Staleness is eliminated
-// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
-// do_memalign() for all other relevant pages.
-//
-// TODO: Bias reclamation to larger addresses
-// TODO: implement mallinfo/mallopt
-// TODO: Better testing
-//
-// 9/28/2003 (new page-level allocator replaces ptmalloc2):
-// * malloc/free of small objects goes from ~300 ns to ~50 ns.
-// * allocation of a reasonably complicated struct
-// goes from about 1100 ns to about 300 ns.
+/*
+ * Copyright (c) 2005, 2007, Google Inc. All rights reserved.
+ * Copyright (C) 2005-2009, 2011, 2015-2016 Apple Inc. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
#include "config.h"
#include "FastMalloc.h"
-#include "Assertions.h"
+#include "CheckedArithmetic.h"
#include "CurrentTime.h"
-
#include <limits>
+#include <string.h>
+#include <wtf/DataLog.h>
+
#if OS(WINDOWS)
#include <windows.h>
#else
#include <pthread.h>
+#include <sys/resource.h>
#endif
-#include <string.h>
-#include <wtf/DataLog.h>
-#include <wtf/StdLibExtras.h>
#if OS(DARWIN)
+#include <mach/mach_init.h>
#include <malloc/malloc.h>
#endif
-#ifndef NO_TCMALLOC_SAMPLES
-#ifdef WTF_CHANGES
-#define NO_TCMALLOC_SAMPLES
-#endif
-#endif
-
-#if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
-#define FORCE_SYSTEM_MALLOC 0
-#else
-#define FORCE_SYSTEM_MALLOC 1
-#endif
-
-// Harden the pointers stored in the TCMalloc linked lists
-#define ENABLE_TCMALLOC_HARDENING 1
-
-// Use a background thread to periodically scavenge memory to release back to the system
-#if PLATFORM(IOS)
-#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
-#else
-#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1
-#endif
-
-#ifndef NDEBUG
namespace WTF {
-#if OS(WINDOWS)
-
-// TLS_OUT_OF_INDEXES is not defined on WinCE.
-#ifndef TLS_OUT_OF_INDEXES
-#define TLS_OUT_OF_INDEXES 0xffffffff
-#endif
-
-static DWORD isForibiddenTlsIndex = TLS_OUT_OF_INDEXES;
-static const LPVOID kTlsAllowValue = reinterpret_cast<LPVOID>(0); // Must be zero.
-static const LPVOID kTlsForbiddenValue = reinterpret_cast<LPVOID>(1);
-
-#if !ASSERT_DISABLED
-static bool isForbidden()
-{
- // By default, fastMalloc is allowed so we don't allocate the
- // tls index unless we're asked to make it forbidden. If TlsSetValue
- // has not been called on a thread, the value returned by TlsGetValue is 0.
- return (isForibiddenTlsIndex != TLS_OUT_OF_INDEXES) && (TlsGetValue(isForibiddenTlsIndex) == kTlsForbiddenValue);
-}
-#endif
-
-void fastMallocForbid()
-{
- if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
- isForibiddenTlsIndex = TlsAlloc(); // a little racey, but close enough for debug only
- TlsSetValue(isForibiddenTlsIndex, kTlsForbiddenValue);
-}
-
-void fastMallocAllow()
-{
- if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
- return;
- TlsSetValue(isForibiddenTlsIndex, kTlsAllowValue);
-}
-
-#else // !OS(WINDOWS)
-
-static pthread_key_t isForbiddenKey;
-static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
-static void initializeIsForbiddenKey()
-{
- pthread_key_create(&isForbiddenKey, 0);
-}
-
-#if !ASSERT_DISABLED
-static bool isForbidden()
-{
- pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
- return !!pthread_getspecific(isForbiddenKey);
-}
-#endif
+#if !defined(NDEBUG)
+namespace {
+size_t maxSingleAllocationSize = std::numeric_limits<size_t>::max();
+};
-void fastMallocForbid()
+void fastSetMaxSingleAllocationSize(size_t size)
{
- pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
- pthread_setspecific(isForbiddenKey, &isForbiddenKey);
+ maxSingleAllocationSize = size;
}
-void fastMallocAllow()
-{
- pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
- pthread_setspecific(isForbiddenKey, 0);
-}
-#endif // OS(WINDOWS)
+#define ASSERT_IS_WITHIN_LIMIT(size) do { \
+ size_t size__ = (size); \
+ ASSERT_WITH_MESSAGE((size__) <= maxSingleAllocationSize, "Requested size (%zu) exceeds max single allocation size set for testing (%zu)", (size__), maxSingleAllocationSize); \
+ } while (false)
-} // namespace WTF
-#endif // NDEBUG
+#define FAIL_IF_EXCEEDS_LIMIT(size) do { \
+ if (UNLIKELY((size) > maxSingleAllocationSize)) \
+ return nullptr; \
+ } while (false)
-namespace WTF {
+#else // !defined(NDEBUG)
+#define ASSERT_IS_WITHIN_LIMIT(size)
+#define FAIL_IF_EXCEEDS_LIMIT(size)
-namespace Internal {
-#if !ENABLE(WTF_MALLOC_VALIDATION)
-WTF_EXPORT_PRIVATE void fastMallocMatchFailed(void*);
-#else
-COMPILE_ASSERT(((sizeof(ValidationHeader) % sizeof(AllocAlignmentInteger)) == 0), ValidationHeader_must_produce_correct_alignment);
-#endif
-
-NO_RETURN_DUE_TO_CRASH void fastMallocMatchFailed(void*)
-{
- CRASH();
-}
-
-} // namespace Internal
-
+#endif // !defined(NDEBUG)
void* fastZeroedMalloc(size_t n)
{
@@ -231,7 +99,7 @@ TryMallocReturnValue tryFastZeroedMalloc(size_t n)
} // namespace WTF
-#if FORCE_SYSTEM_MALLOC
+#if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
#if OS(WINDOWS)
#include <malloc.h>
@@ -239,6 +107,11 @@ TryMallocReturnValue tryFastZeroedMalloc(size_t n)
namespace WTF {
+bool isFastMallocEnabled()
+{
+ return false;
+}
+
size_t fastMallocGoodSize(size_t bytes)
{
#if OS(DARWIN)
@@ -248,43 +121,65 @@ size_t fastMallocGoodSize(size_t bytes)
#endif
}
-TryMallocReturnValue tryFastMalloc(size_t n)
+#if OS(WINDOWS)
+
+void* fastAlignedMalloc(size_t alignment, size_t size)
{
- ASSERT(!isForbidden());
+ ASSERT_IS_WITHIN_LIMIT(size);
+ void* p = _aligned_malloc(size, alignment);
+ if (UNLIKELY(!p))
+ CRASH();
+ return p;
+}
-#if ENABLE(WTF_MALLOC_VALIDATION)
- if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
- return 0;
+void* tryFastAlignedMalloc(size_t alignment, size_t size)
+{
+ FAIL_IF_EXCEEDS_LIMIT(size);
+ return _aligned_malloc(size, alignment);
+}
+
+void fastAlignedFree(void* p)
+{
+ _aligned_free(p);
+}
- void* result = malloc(n + Internal::ValidationBufferSize);
- if (!result)
- return 0;
- Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
- header->m_size = n;
- header->m_type = Internal::AllocTypeMalloc;
- header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
- result = header + 1;
- *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
- fastMallocValidate(result);
- return result;
#else
+
+void* fastAlignedMalloc(size_t alignment, size_t size)
+{
+ ASSERT_IS_WITHIN_LIMIT(size);
+ void* p = nullptr;
+ posix_memalign(&p, alignment, size);
+ if (UNLIKELY(!p))
+ CRASH();
+ return p;
+}
+
+void* tryFastAlignedMalloc(size_t alignment, size_t size)
+{
+ FAIL_IF_EXCEEDS_LIMIT(size);
+ void* p = nullptr;
+ posix_memalign(&p, alignment, size);
+ return p;
+}
+
+void fastAlignedFree(void* p)
+{
+ free(p);
+}
+
+#endif // OS(WINDOWS)
+
+TryMallocReturnValue tryFastMalloc(size_t n)
+{
+ FAIL_IF_EXCEEDS_LIMIT(n);
return malloc(n);
-#endif
}
void* fastMalloc(size_t n)
{
- ASSERT(!isForbidden());
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- TryMallocReturnValue returnValue = tryFastMalloc(n);
- void* result;
- if (!returnValue.getValue(result))
- CRASH();
-#else
+ ASSERT_IS_WITHIN_LIMIT(n);
void* result = malloc(n);
-#endif
-
if (!result)
CRASH();
@@ -293,38 +188,14 @@ void* fastMalloc(size_t n)
TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
{
- ASSERT(!isForbidden());
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- size_t totalBytes = n_elements * element_size;
- if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements)
- return 0;
-
- TryMallocReturnValue returnValue = tryFastMalloc(totalBytes);
- void* result;
- if (!returnValue.getValue(result))
- return 0;
- memset(result, 0, totalBytes);
- fastMallocValidate(result);
- return result;
-#else
+ FAIL_IF_EXCEEDS_LIMIT(n_elements * element_size);
return calloc(n_elements, element_size);
-#endif
}
void* fastCalloc(size_t n_elements, size_t element_size)
{
- ASSERT(!isForbidden());
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
- void* result;
- if (!returnValue.getValue(result))
- CRASH();
-#else
+ ASSERT_IS_WITHIN_LIMIT(n_elements * element_size);
void* result = calloc(n_elements, element_size);
-#endif
-
if (!result)
CRASH();
@@ -333,65 +204,20 @@ void* fastCalloc(size_t n_elements, size_t element_size)
void fastFree(void* p)
{
- ASSERT(!isForbidden());
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- if (!p)
- return;
-
- fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc);
- Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p);
- memset(p, 0xCC, header->m_size);
- free(header);
-#else
free(p);
-#endif
-}
-
-TryMallocReturnValue tryFastRealloc(void* p, size_t n)
-{
- ASSERT(!isForbidden());
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- if (p) {
- if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
- return 0;
- fastMallocValidate(p);
- Internal::ValidationHeader* result = static_cast<Internal::ValidationHeader*>(realloc(Internal::fastMallocValidationHeader(p), n + Internal::ValidationBufferSize));
- if (!result)
- return 0;
- result->m_size = n;
- result = result + 1;
- *fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
- fastMallocValidate(result);
- return result;
- } else {
- return fastMalloc(n);
- }
-#else
- return realloc(p, n);
-#endif
}
void* fastRealloc(void* p, size_t n)
{
- ASSERT(!isForbidden());
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- TryMallocReturnValue returnValue = tryFastRealloc(p, n);
- void* result;
- if (!returnValue.getValue(result))
- CRASH();
-#else
+ ASSERT_IS_WITHIN_LIMIT(n);
void* result = realloc(p, n);
-#endif
-
if (!result)
CRASH();
return result;
}
void releaseFastMallocFreeMemory() { }
+void releaseFastMallocFreeMemoryForThisThread() { }
FastMallocStatistics fastMallocStatistics()
{
@@ -401,9 +227,7 @@ FastMallocStatistics fastMallocStatistics()
size_t fastMallocSize(const void* p)
{
-#if ENABLE(WTF_MALLOC_VALIDATION)
- return Internal::fastMallocValidationHeader(const_cast<void*>(p))->m_size;
-#elif OS(DARWIN)
+#if OS(DARWIN)
return malloc_size(p);
#elif OS(WINDOWS)
return _msize(const_cast<void*>(p));
@@ -415,4722 +239,127 @@ size_t fastMallocSize(const void* p)
} // namespace WTF
-#if OS(DARWIN)
-// This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
-// It will never be used in this case, so it's type and value are less interesting than its presence.
-extern "C" WTF_EXPORT_PRIVATE const int jscore_fastmalloc_introspection = 0;
-#endif
-
-#else // FORCE_SYSTEM_MALLOC
-
-#include "TCPackedCache.h"
-#include "TCPageMap.h"
-#include "TCSpinLock.h"
-#include "TCSystemAlloc.h"
-#include "ThreadSpecific.h"
-#include <algorithm>
-#if USE(PTHREADS)
-#include <pthread.h>
-#endif
-#include <stdarg.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#if HAVE(ERRNO_H)
-#include <errno.h>
-#endif
-#if OS(UNIX)
-#include <unistd.h>
-#endif
-#if OS(WINDOWS)
-#ifndef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#endif
-#include <windows.h>
-#endif
-
-#ifdef WTF_CHANGES
-
-#if OS(DARWIN)
-#include <wtf/HashSet.h>
-#include <wtf/Vector.h>
-#endif
-
-#if HAVE(DISPATCH_H)
-#include <dispatch/dispatch.h>
-#endif
-
-#ifdef __has_include
-#if __has_include(<System/pthread_machdep.h>)
-
-#include <System/pthread_machdep.h>
-
-#if defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
-#define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1
-#endif
-
-#endif
-#endif
-
-#ifndef PRIuS
-#define PRIuS "zu"
-#endif
+#else // defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
-// Calling pthread_getspecific through a global function pointer is faster than a normal
-// call to the function on Mac OS X, and it's used in performance-critical code. So we
-// use a function pointer. But that's not necessarily faster on other platforms, and we had
-// problems with this technique on Windows, so we'll do this only on Mac OS X.
-#if OS(DARWIN)
-#if !USE(PTHREAD_GETSPECIFIC_DIRECT)
-static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
-#define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
-#else
-#define pthread_getspecific(key) _pthread_getspecific_direct(key)
-#define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val))
-#endif
-#endif
-
-#define DEFINE_VARIABLE(type, name, value, meaning) \
- namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
- type FLAGS_##name(value); \
- char FLAGS_no##name; \
- } \
- using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
-
-#define DEFINE_int64(name, value, meaning) \
- DEFINE_VARIABLE(int64_t, name, value, meaning)
-
-#define DEFINE_double(name, value, meaning) \
- DEFINE_VARIABLE(double, name, value, meaning)
+#include <bmalloc/bmalloc.h>
namespace WTF {
-#define malloc fastMalloc
-#define calloc fastCalloc
-#define free fastFree
-#define realloc fastRealloc
-
-#define MESSAGE LOG_ERROR
-#define CHECK_CONDITION ASSERT
-
-#if !OS(DARWIN)
-static const char kLLHardeningMask = 0;
-#endif
-
-template <unsigned> struct EntropySource;
-template <> struct EntropySource<4> {
- static uint32_t value()
- {
-#if OS(DARWIN)
- return arc4random();
-#else
- return static_cast<uint32_t>(static_cast<uintptr_t>(currentTime() * 10000) ^ reinterpret_cast<uintptr_t>(&kLLHardeningMask));
-#endif
- }
-};
-
-template <> struct EntropySource<8> {
- static uint64_t value()
- {
- return EntropySource<4>::value() | (static_cast<uint64_t>(EntropySource<4>::value()) << 32);
- }
-};
-
-#if ENABLE(TCMALLOC_HARDENING)
-/*
- * To make it harder to exploit use-after free style exploits
- * we mask the addresses we put into our linked lists with the
- * address of kLLHardeningMask. Due to ASLR the address of
- * kLLHardeningMask should be sufficiently randomized to make direct
- * freelist manipulation much more difficult.
- */
-enum {
- MaskKeyShift = 13
-};
-
-static ALWAYS_INLINE uintptr_t internalEntropyValue()
+bool isFastMallocEnabled()
{
- static uintptr_t value = EntropySource<sizeof(uintptr_t)>::value() | 1;
- ASSERT(value);
- return value;
-}
-
-#define HARDENING_ENTROPY internalEntropyValue()
-#define ROTATE_VALUE(value, amount) (((value) >> (amount)) | ((value) << (sizeof(value) * 8 - (amount))))
-#if COMPILER(MSVC)
-#define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<decltype(ptr)>(reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy)))
-#else
-#define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<__typeof__(ptr)>(reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy)))
-#endif
-
-static ALWAYS_INLINE uint32_t freedObjectStartPoison()
-{
- static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1;
- ASSERT(value);
- return value;
-}
-
-static ALWAYS_INLINE uint32_t freedObjectEndPoison()
-{
- static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1;
- ASSERT(value);
- return value;
-}
-
-#define PTR_TO_UINT32(ptr) static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ptr))
-#define END_POISON_INDEX(allocationSize) (((allocationSize) - sizeof(uint32_t)) / sizeof(uint32_t))
-#define POISON_ALLOCATION(allocation, allocationSize) do { \
- ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \
- reinterpret_cast<uint32_t*>(allocation)[0] = 0xbadbeef1; \
- reinterpret_cast<uint32_t*>(allocation)[1] = 0xbadbeef3; \
- if ((allocationSize) < 4 * sizeof(uint32_t)) \
- break; \
- reinterpret_cast<uint32_t*>(allocation)[2] = 0xbadbeef5; \
- reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] = 0xbadbeef7; \
-} while (false);
-
-#define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, endPoison) do { \
- ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \
- reinterpret_cast_ptr<uint32_t*>(allocation)[0] = 0xbadbeef9; \
- reinterpret_cast_ptr<uint32_t*>(allocation)[1] = 0xbadbeefb; \
- if ((allocationSize) < 4 * sizeof(uint32_t)) \
- break; \
- reinterpret_cast_ptr<uint32_t*>(allocation)[2] = (startPoison) ^ PTR_TO_UINT32(allocation); \
- reinterpret_cast_ptr<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] = (endPoison) ^ PTR_TO_UINT32(allocation); \
-} while (false)
-
-#define POISON_DEALLOCATION(allocation, allocationSize) \
- POISON_DEALLOCATION_EXPLICIT(allocation, (allocationSize), freedObjectStartPoison(), freedObjectEndPoison())
-
-#define MAY_BE_POISONED(allocation, allocationSize) (((allocationSize) >= 4 * sizeof(uint32_t)) && ( \
- (reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ PTR_TO_UINT32(allocation))) || \
- (reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] == (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \
-))
-
-#define IS_DEFINITELY_POISONED(allocation, allocationSize) (((allocationSize) < 4 * sizeof(uint32_t)) || ( \
- (reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ PTR_TO_UINT32(allocation))) && \
- (reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] == (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \
-))
-
-#else
-
-#define POISON_ALLOCATION(allocation, allocationSize)
-#define POISON_DEALLOCATION(allocation, allocationSize)
-#define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, endPoison)
-#define MAY_BE_POISONED(allocation, allocationSize) (false)
-#define IS_DEFINITELY_POISONED(allocation, allocationSize) (true)
-#define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (((void)entropy), ((void)key), ptr)
-
-#define HARDENING_ENTROPY 0
-
-#endif
-
-//-------------------------------------------------------------------
-// Configuration
-//-------------------------------------------------------------------
-
-// Not all possible combinations of the following parameters make
-// sense. In particular, if kMaxSize increases, you may have to
-// increase kNumClasses as well.
-#if OS(DARWIN)
-# define K_PAGE_SHIFT PAGE_SHIFT
-# if (K_PAGE_SHIFT == 12)
-# define K_NUM_CLASSES 68
-# elif (K_PAGE_SHIFT == 14)
-# define K_NUM_CLASSES 77
-# else
-# error "Unsupported PAGE_SHIFT amount"
-# endif
-#else
-# define K_PAGE_SHIFT 12
-# define K_NUM_CLASSES 68
-#endif
-static const size_t kPageShift = K_PAGE_SHIFT;
-static const size_t kPageSize = 1 << kPageShift;
-static const size_t kMaxSize = 32u * 1024;
-static const size_t kAlignShift = 3;
-static const size_t kAlignment = 1 << kAlignShift;
-static const size_t kNumClasses = K_NUM_CLASSES;
-
-// Allocates a big block of memory for the pagemap once we reach more than
-// 128MB
-static const size_t kPageMapBigAllocationThreshold = 128 << 20;
-
-// Minimum number of pages to fetch from system at a time. Must be
-// significantly bigger than kPageSize to amortize system-call
-// overhead, and also to reduce external fragementation. Also, we
-// should keep this value big because various incarnations of Linux
-// have small limits on the number of mmap() regions per
-// address-space.
-static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
-
-// Number of objects to move between a per-thread list and a central
-// list in one shot. We want this to be not too small so we can
-// amortize the lock overhead for accessing the central list. Making
-// it too big may temporarily cause unnecessary memory wastage in the
-// per-thread free list until the scavenger cleans up the list.
-static int num_objects_to_move[kNumClasses];
-
-// Maximum length we allow a per-thread free-list to have before we
-// move objects from it into the corresponding central free-list. We
-// want this big to avoid locking the central free-list too often. It
-// should not hurt to make this list somewhat big because the
-// scavenging code will shrink it down when its contents are not in use.
-static const int kMaxFreeListLength = 256;
-
-// Lower and upper bounds on the per-thread cache sizes
-static const size_t kMinThreadCacheSize = kMaxSize * 2;
-#if PLATFORM(IOS)
-static const size_t kMaxThreadCacheSize = 512 * 1024;
-#else
-static const size_t kMaxThreadCacheSize = 2 << 20;
-#endif
-
-// Default bound on the total amount of thread caches
-static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
-
-// For all span-lengths < kMaxPages we keep an exact-size list.
-// REQUIRED: kMaxPages >= kMinSystemAlloc;
-static const size_t kMaxPages = kMinSystemAlloc;
-
-/* The smallest prime > 2^n */
-static int primes_list[] = {
- // Small values might cause high rates of sampling
- // and hence commented out.
- // 2, 5, 11, 17, 37, 67, 131, 257,
- // 521, 1031, 2053, 4099, 8209, 16411,
- 32771, 65537, 131101, 262147, 524309, 1048583,
- 2097169, 4194319, 8388617, 16777259, 33554467 };
-
-// Twice the approximate gap between sampling actions.
-// I.e., we take one sample approximately once every
-// tcmalloc_sample_parameter/2
-// bytes of allocation, i.e., ~ once every 128KB.
-// Must be a prime number.
-#ifdef NO_TCMALLOC_SAMPLES
-DEFINE_int64(tcmalloc_sample_parameter, 0,
- "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
-static size_t sample_period = 0;
-#else
-DEFINE_int64(tcmalloc_sample_parameter, 262147,
- "Twice the approximate gap between sampling actions."
- " Must be a prime number. Otherwise will be rounded up to a "
- " larger prime number");
-static size_t sample_period = 262147;
-#endif
-
-// Protects sample_period above
-static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
-
-// Parameters for controlling how fast memory is returned to the OS.
-
-DEFINE_double(tcmalloc_release_rate, 1,
- "Rate at which we release unused memory to the system. "
- "Zero means we never release memory back to the system. "
- "Increase this flag to return memory faster; decrease it "
- "to return memory slower. Reasonable rates are in the "
- "range [0,10]");
-
-//-------------------------------------------------------------------
-// Mapping from size to size_class and vice versa
-//-------------------------------------------------------------------
-
-// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
-// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
-// So for these larger sizes we have an array indexed by ceil(size/128).
-//
-// We flatten both logical arrays into one physical array and use
-// arithmetic to compute an appropriate index. The constants used by
-// ClassIndex() were selected to make the flattening work.
-//
-// Examples:
-// Size Expression Index
-// -------------------------------------------------------
-// 0 (0 + 7) / 8 0
-// 1 (1 + 7) / 8 1
-// ...
-// 1024 (1024 + 7) / 8 128
-// 1025 (1025 + 127 + (120<<7)) / 128 129
-// ...
-// 32768 (32768 + 127 + (120<<7)) / 128 376
-static const size_t kMaxSmallSize = 1024;
-static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
-static const int add_amount[2] = { 7, 127 + (120 << 7) };
-static unsigned char class_array[377];
-
-// Compute index of the class_array[] entry for a given size
-static inline int ClassIndex(size_t s) {
- const int i = (s > kMaxSmallSize);
- return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
+ return bmalloc::api::isEnabled();
}
-// Mapping from size class to max size storable in that class
-static size_t class_to_size[kNumClasses];
-
-// Mapping from size class to number of pages to allocate at a time
-static size_t class_to_pages[kNumClasses];
-
-// Hardened singly linked list. We make this a class to allow compiler to
-// statically prevent mismatching hardened and non-hardened list
-class HardenedSLL {
-public:
- static ALWAYS_INLINE HardenedSLL create(void* value)
- {
- HardenedSLL result;
- result.m_value = value;
- return result;
- }
-
- static ALWAYS_INLINE HardenedSLL null()
- {
- HardenedSLL result;
- result.m_value = 0;
- return result;
- }
-
- ALWAYS_INLINE void setValue(void* value) { m_value = value; }
- ALWAYS_INLINE void* value() const { return m_value; }
- ALWAYS_INLINE bool operator!() const { return !m_value; }
- typedef void* (HardenedSLL::*UnspecifiedBoolType);
- ALWAYS_INLINE operator UnspecifiedBoolType() const { return m_value ? &HardenedSLL::m_value : 0; }
-
- bool operator!=(const HardenedSLL& other) const { return m_value != other.m_value; }
- bool operator==(const HardenedSLL& other) const { return m_value == other.m_value; }
-
-private:
- void* m_value;
-};
-
-// TransferCache is used to cache transfers of num_objects_to_move[size_class]
-// back and forth between thread caches and the central cache for a given size
-// class.
-struct TCEntry {
- HardenedSLL head; // Head of chain of objects.
- HardenedSLL tail; // Tail of chain of objects.
-};
-// A central cache freelist can have anywhere from 0 to kNumTransferEntries
-// slots to put link list chains into. To keep memory usage bounded the total
-// number of TCEntries across size classes is fixed. Currently each size
-// class is initially given one TCEntry which also means that the maximum any
-// one class can have is kNumClasses.
-static const int kNumTransferEntries = kNumClasses;
-
-// Note: the following only works for "n"s that fit in 32-bits, but
-// that is fine since we only use it for small sizes.
-static inline int LgFloor(size_t n) {
- int log = 0;
- for (int i = 4; i >= 0; --i) {
- int shift = (1 << i);
- size_t x = n >> shift;
- if (x != 0) {
- n = x;
- log += shift;
- }
- }
- ASSERT(n == 1);
- return log;
-}
-
-// Functions for using our simple hardened singly linked list
-static ALWAYS_INLINE HardenedSLL SLL_Next(HardenedSLL t, uintptr_t entropy) {
- void* tValueNext = *(reinterpret_cast<void**>(t.value()));
- return HardenedSLL::create(XOR_MASK_PTR_WITH_KEY(tValueNext, t.value(), entropy));
-}
-
-static ALWAYS_INLINE void SLL_SetNext(HardenedSLL t, HardenedSLL n, uintptr_t entropy) {
- *(reinterpret_cast<void**>(t.value())) = XOR_MASK_PTR_WITH_KEY(n.value(), t.value(), entropy);
-}
-
-static ALWAYS_INLINE void SLL_Push(HardenedSLL* list, HardenedSLL element, uintptr_t entropy) {
- SLL_SetNext(element, *list, entropy);
- *list = element;
-}
-
-static ALWAYS_INLINE HardenedSLL SLL_Pop(HardenedSLL *list, uintptr_t entropy) {
- HardenedSLL result = *list;
- *list = SLL_Next(*list, entropy);
- return result;
-}
-
-// Remove N elements from a linked list to which head points. head will be
-// modified to point to the new head. start and end will point to the first
-// and last nodes of the range. Note that end will point to NULL after this
-// function is called.
-
-static ALWAYS_INLINE void SLL_PopRange(HardenedSLL* head, int N, HardenedSLL *start, HardenedSLL *end, uintptr_t entropy) {
- if (N == 0) {
- *start = HardenedSLL::null();
- *end = HardenedSLL::null();
- return;
- }
-
- HardenedSLL tmp = *head;
- for (int i = 1; i < N; ++i) {
- tmp = SLL_Next(tmp, entropy);
- }
-
- *start = *head;
- *end = tmp;
- *head = SLL_Next(tmp, entropy);
- // Unlink range from list.
- SLL_SetNext(tmp, HardenedSLL::null(), entropy);
-}
-
-static ALWAYS_INLINE void SLL_PushRange(HardenedSLL *head, HardenedSLL start, HardenedSLL end, uintptr_t entropy) {
- if (!start) return;
- SLL_SetNext(end, *head, entropy);
- *head = start;
-}
-
-// Setup helper functions.
-
-static ALWAYS_INLINE size_t SizeClass(size_t size) {
- return class_array[ClassIndex(size)];
-}
-
-// Get the byte-size for a specified class
-static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
- return class_to_size[cl];
-}
-static int NumMoveSize(size_t size) {
- if (size == 0) return 0;
- // Use approx 64k transfers between thread and central caches.
- int num = static_cast<int>(64.0 * 1024.0 / size);
- if (num < 2) num = 2;
- // Clamp well below kMaxFreeListLength to avoid ping pong between central
- // and thread caches.
- if (num > static_cast<int>(0.8 * kMaxFreeListLength))
- num = static_cast<int>(0.8 * kMaxFreeListLength);
-
- // Also, avoid bringing in too many objects into small object free
- // lists. There are lots of such lists, and if we allow each one to
- // fetch too many at a time, we end up having to scavenge too often
- // (especially when there are lots of threads and each thread gets a
- // small allowance for its thread cache).
- //
- // TODO: Make thread cache free list sizes dynamic so that we do not
- // have to equally divide a fixed resource amongst lots of threads.
- if (num > 32) num = 32;
-
- return num;
-}
-
-// Initialize the mapping arrays
-static void InitSizeClasses() {
- // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
- if (ClassIndex(0) < 0) {
- MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
- CRASH();
- }
- if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
- MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
- CRASH();
- }
-
- // Compute the size classes we want to use
- size_t sc = 1; // Next size class to assign
- unsigned char alignshift = kAlignShift;
- int last_lg = -1;
- for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
- int lg = LgFloor(size);
- if (lg > last_lg) {
- // Increase alignment every so often.
- //
- // Since we double the alignment every time size doubles and
- // size >= 128, this means that space wasted due to alignment is
- // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
- // bytes, so the space wasted as a percentage starts falling for
- // sizes > 2K.
- if ((lg >= 7) && (alignshift < 8)) {
- alignshift++;
- }
- last_lg = lg;
- }
-
- // Allocate enough pages so leftover is less than 1/8 of total.
- // This bounds wasted space to at most 12.5%.
- size_t psize = kPageSize;
- while ((psize % size) > (psize >> 3)) {
- psize += kPageSize;
- }
- const size_t my_pages = psize >> kPageShift;
-
- if (sc > 1 && my_pages == class_to_pages[sc-1]) {
- // See if we can merge this into the previous class without
- // increasing the fragmentation of the previous class.
- const size_t my_objects = (my_pages << kPageShift) / size;
- const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
- / class_to_size[sc-1];
- if (my_objects == prev_objects) {
- // Adjust last class to include this size
- class_to_size[sc-1] = size;
- continue;
- }
- }
-
- // Add new class
- class_to_pages[sc] = my_pages;
- class_to_size[sc] = size;
- sc++;
- }
- if (sc != kNumClasses) {
- MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
- sc, int(kNumClasses));
- CRASH();
- }
-
- // Initialize the mapping arrays
- int next_size = 0;
- for (unsigned char c = 1; c < kNumClasses; c++) {
- const size_t max_size_in_class = class_to_size[c];
- for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
- class_array[ClassIndex(s)] = c;
- }
- next_size = static_cast<int>(max_size_in_class + kAlignment);
- }
-
- // Double-check sizes just to be safe
- for (size_t size = 0; size <= kMaxSize; size++) {
- const size_t sc = SizeClass(size);
- if (sc == 0) {
- MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
- CRASH();
- }
- if (sc > 1 && size <= class_to_size[sc-1]) {
- MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
- "\n", sc, size);
- CRASH();
- }
- if (sc >= kNumClasses) {
- MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
- CRASH();
- }
- const size_t s = class_to_size[sc];
- if (size > s) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
- CRASH();
- }
- if (s == 0) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
- CRASH();
- }
- }
-
- // Initialize the num_objects_to_move array.
- for (size_t cl = 1; cl < kNumClasses; ++cl) {
- num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
- }
-
-#ifndef WTF_CHANGES
- if (false) {
- // Dump class sizes and maximum external wastage per size class
- for (size_t cl = 1; cl < kNumClasses; ++cl) {
- const int alloc_size = class_to_pages[cl] << kPageShift;
- const int alloc_objs = alloc_size / class_to_size[cl];
- const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
- const int max_waste = alloc_size - min_used;
- MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
- int(cl),
- int(class_to_size[cl-1] + 1),
- int(class_to_size[cl]),
- int(class_to_pages[cl] << kPageShift),
- max_waste * 100.0 / alloc_size
- );
- }
- }
-#endif
-}
-
-// -------------------------------------------------------------------------
-// Simple allocator for objects of a specified type. External locking
-// is required before accessing one of these objects.
-// -------------------------------------------------------------------------
-
-// Metadata allocator -- keeps stats about how many bytes allocated
-static uint64_t metadata_system_bytes = 0;
-static void* MetaDataAlloc(size_t bytes) {
- void* result = TCMalloc_SystemAlloc(bytes, 0);
- if (result != NULL) {
- metadata_system_bytes += bytes;
- }
- return result;
-}
-
-#if defined(WTF_CHANGES) && OS(DARWIN)
-class RemoteMemoryReader;
-#endif
-
-template <class T>
-class PageHeapAllocator {
- private:
- // How much to allocate from system at a time
- static const size_t kAllocIncrement = 32 << 10;
-
- // Aligned size of T
- static const size_t kAlignedSize
- = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
-
- // Free area from which to carve new objects
- char* free_area_;
- size_t free_avail_;
-
- // Linked list of all regions allocated by this allocator
- HardenedSLL allocated_regions_;
-
- // Free list of already carved objects
- HardenedSLL free_list_;
-
- // Number of allocated but unfreed objects
- int inuse_;
- uintptr_t entropy_;
-
- public:
- void Init(uintptr_t entropy) {
- ASSERT(kAlignedSize <= kAllocIncrement);
- inuse_ = 0;
- allocated_regions_ = HardenedSLL::null();
- free_area_ = NULL;
- free_avail_ = 0;
- free_list_.setValue(NULL);
- entropy_ = entropy;
- }
-
- T* New() {
- // Consult free list
- void* result;
- if (free_list_) {
- result = free_list_.value();
- free_list_ = SLL_Next(free_list_, entropy_);
- } else {
- if (free_avail_ < kAlignedSize) {
- // Need more room
- char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- if (!new_allocation)
- CRASH();
-
- HardenedSLL new_head = HardenedSLL::create(new_allocation);
- SLL_SetNext(new_head, allocated_regions_, entropy_);
- allocated_regions_ = new_head;
- free_area_ = new_allocation + kAlignedSize;
- free_avail_ = kAllocIncrement - kAlignedSize;
- }
- result = free_area_;
- free_area_ += kAlignedSize;
- free_avail_ -= kAlignedSize;
- }
- inuse_++;
- return reinterpret_cast<T*>(result);
- }
-
- void Delete(T* p) {
- HardenedSLL new_head = HardenedSLL::create(p);
- SLL_SetNext(new_head, free_list_, entropy_);
- free_list_ = new_head;
- inuse_--;
- }
-
- int inuse() const { return inuse_; }
-
-#if defined(WTF_CHANGES) && OS(DARWIN)
- template <typename Recorder>
- void recordAdministrativeRegions(Recorder&, const RemoteMemoryReader&);
-#endif
-};
-
-// -------------------------------------------------------------------------
-// Span - a contiguous run of pages
-// -------------------------------------------------------------------------
-
-// Type that can hold a page number
-typedef uintptr_t PageID;
-
-// Type that can hold the length of a run of pages
-typedef uintptr_t Length;
-
-static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
-
-// Convert byte size into pages. This won't overflow, but may return
-// an unreasonably large value if bytes is huge enough.
-static inline Length pages(size_t bytes) {
- return (bytes >> kPageShift) +
- ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
-}
-
-// Convert a user size into the number of bytes that will actually be
-// allocated
-static size_t AllocationSize(size_t bytes) {
- if (bytes > kMaxSize) {
- // Large object: we allocate an integral number of pages
- ASSERT(bytes <= (kMaxValidPages << kPageShift));
- return pages(bytes) << kPageShift;
- } else {
- // Small object: find the size class to which it belongs
- return ByteSizeForClass(SizeClass(bytes));
- }
-}
-
-enum {
- kSpanCookieBits = 10,
- kSpanCookieMask = (1 << 10) - 1,
- kSpanThisShift = 7
-};
-
-static uint32_t spanValidationCookie;
-static uint32_t spanInitializerCookie()
-{
- static uint32_t value = EntropySource<sizeof(uint32_t)>::value() & kSpanCookieMask;
- spanValidationCookie = value;
- return value;
-}
-
-// Information kept for a span (a contiguous run of pages).
-struct Span {
- PageID start; // Starting page number
- Length length; // Number of pages in span
- Span* next(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, this, entropy); }
- Span* remoteNext(const Span* remoteSpanPointer, uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, remoteSpanPointer, entropy); }
- Span* prev(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_prev, this, entropy); }
- void setNext(Span* next, uintptr_t entropy) { m_next = XOR_MASK_PTR_WITH_KEY(next, this, entropy); }
- void setPrev(Span* prev, uintptr_t entropy) { m_prev = XOR_MASK_PTR_WITH_KEY(prev, this, entropy); }
-
-private:
- Span* m_next; // Used when in link list
- Span* m_prev; // Used when in link list
-public:
- HardenedSLL objects; // Linked list of free objects
- unsigned int free : 1; // Is the span free
-#ifndef NO_TCMALLOC_SAMPLES
- unsigned int sample : 1; // Sampled object?
-#endif
- unsigned int sizeclass : 8; // Size-class for small objects (or 0)
- unsigned int refcount : 11; // Number of non-free objects
- bool decommitted : 1;
- void initCookie()
- {
- m_cookie = ((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanCookieMask) ^ spanInitializerCookie();
- }
- void clearCookie() { m_cookie = 0; }
- bool isValid() const
- {
- return (((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanCookieMask) ^ m_cookie) == spanValidationCookie;
- }
-private:
- uint32_t m_cookie : kSpanCookieBits;
-
-#undef SPAN_HISTORY
-#ifdef SPAN_HISTORY
- // For debugging, we can keep a log events per span
- int nexthistory;
- char history[64];
- int value[64];
-#endif
-};
-
-#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
-
-#ifdef SPAN_HISTORY
-void Event(Span* span, char op, int v = 0) {
- span->history[span->nexthistory] = op;
- span->value[span->nexthistory] = v;
- span->nexthistory++;
- if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
-}
-#else
-#define Event(s,o,v) ((void) 0)
-#endif
-
-// Allocator/deallocator for spans
-static PageHeapAllocator<Span> span_allocator;
-static Span* NewSpan(PageID p, Length len) {
- Span* result = span_allocator.New();
- memset(result, 0, sizeof(*result));
- result->start = p;
- result->length = len;
- result->initCookie();
-#ifdef SPAN_HISTORY
- result->nexthistory = 0;
-#endif
- return result;
-}
-
-static inline void DeleteSpan(Span* span) {
- RELEASE_ASSERT(span->isValid());
-#ifndef NDEBUG
- // In debug mode, trash the contents of deleted Spans
- memset(span, 0x3f, sizeof(*span));
-#endif
- span->clearCookie();
- span_allocator.Delete(span);
-}
-
-// -------------------------------------------------------------------------
-// Doubly linked list of spans.
-// -------------------------------------------------------------------------
-
-static inline void DLL_Init(Span* list, uintptr_t entropy) {
- list->setNext(list, entropy);
- list->setPrev(list, entropy);
-}
-
-static inline void DLL_Remove(Span* span, uintptr_t entropy) {
- span->prev(entropy)->setNext(span->next(entropy), entropy);
- span->next(entropy)->setPrev(span->prev(entropy), entropy);
- span->setPrev(NULL, entropy);
- span->setNext(NULL, entropy);
-}
-
-static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list, uintptr_t entropy) {
- return list->next(entropy) == list;
-}
-
-static int DLL_Length(const Span* list, uintptr_t entropy) {
- int result = 0;
- for (Span* s = list->next(entropy); s != list; s = s->next(entropy)) {
- result++;
- }
- return result;
-}
-
-#if 0 /* Not needed at the moment -- causes compiler warnings if not used */
-static void DLL_Print(const char* label, const Span* list) {
- MESSAGE("%-10s %p:", label, list);
- for (const Span* s = list->next; s != list; s = s->next) {
- MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
- }
- MESSAGE("\n");
-}
-#endif
-
-static inline void DLL_Prepend(Span* list, Span* span, uintptr_t entropy) {
- span->setNext(list->next(entropy), entropy);
- span->setPrev(list, entropy);
- list->next(entropy)->setPrev(span, entropy);
- list->setNext(span, entropy);
-}
-
-//-------------------------------------------------------------------
-// Data kept per size-class in central cache
-//-------------------------------------------------------------------
-
-class TCMalloc_Central_FreeList {
- public:
- void Init(size_t cl, uintptr_t entropy);
-
- // These methods all do internal locking.
-
- // Insert the specified range into the central freelist. N is the number of
- // elements in the range.
- void InsertRange(HardenedSLL start, HardenedSLL end, int N);
-
- // Returns the actual number of fetched elements into N.
- void RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N);
-
- // Returns the number of free objects in cache.
- size_t length() {
- SpinLockHolder h(&lock_);
- return counter_;
- }
-
- // Returns the number of free objects in the transfer cache.
- int tc_length() {
- SpinLockHolder h(&lock_);
- return used_slots_ * num_objects_to_move[size_class_];
- }
-
-#ifdef WTF_CHANGES
- template <class Finder, class Reader>
- void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
- {
- {
- static const ptrdiff_t emptyOffset = reinterpret_cast<const char*>(&empty_) - reinterpret_cast<const char*>(this);
- Span* remoteEmpty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + emptyOffset);
- Span* remoteSpan = nonempty_.remoteNext(remoteEmpty, entropy_);
- for (Span* span = reader(remoteEmpty); span && span != &empty_; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0))
- ASSERT(!span->objects);
- }
-
- ASSERT(!nonempty_.objects);
- static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
-
- Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
- Span* remoteSpan = nonempty_.remoteNext(remoteNonempty, entropy_);
-
- for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0)) {
- for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_))) {
- finder.visit(nextObject.value());
- }
- }
-
- for (int slot = 0; slot < used_slots_; ++slot) {
- for (HardenedSLL entry = tc_slots_[slot].head; entry; entry.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(entry.value()), entropy_)))
- finder.visit(entry.value());
- }
- }
-#endif
-
- uintptr_t entropy() const { return entropy_; }
- private:
- // REQUIRES: lock_ is held
- // Remove object from cache and return.
- // Return NULL if no free entries in cache.
- HardenedSLL FetchFromSpans();
-
- // REQUIRES: lock_ is held
- // Remove object from cache and return. Fetches
- // from pageheap if cache is empty. Only returns
- // NULL on allocation failure.
- HardenedSLL FetchFromSpansSafe();
-
- // REQUIRES: lock_ is held
- // Release a linked list of objects to spans.
- // May temporarily release lock_.
- void ReleaseListToSpans(HardenedSLL start);
-
- // REQUIRES: lock_ is held
- // Release an object to spans.
- // May temporarily release lock_.
- ALWAYS_INLINE void ReleaseToSpans(HardenedSLL object);
-
- // REQUIRES: lock_ is held
- // Populate cache by fetching from the page heap.
- // May temporarily release lock_.
- ALWAYS_INLINE void Populate();
-
- // REQUIRES: lock is held.
- // Tries to make room for a TCEntry. If the cache is full it will try to
- // expand it at the cost of some other cache size. Return false if there is
- // no space.
- bool MakeCacheSpace();
-
- // REQUIRES: lock_ for locked_size_class is held.
- // Picks a "random" size class to steal TCEntry slot from. In reality it
- // just iterates over the sizeclasses but does so without taking a lock.
- // Returns true on success.
- // May temporarily lock a "random" size class.
- static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool force);
-
- // REQUIRES: lock_ is *not* held.
- // Tries to shrink the Cache. If force is true it will relase objects to
- // spans if it allows it to shrink the cache. Return false if it failed to
- // shrink the cache. Decrements cache_size_ on succeess.
- // May temporarily take lock_. If it takes lock_, the locked_size_class
- // lock is released to the thread from holding two size class locks
- // concurrently which could lead to a deadlock.
- bool ShrinkCache(int locked_size_class, bool force);
-
- // This lock protects all the data members. cached_entries and cache_size_
- // may be looked at without holding the lock.
- SpinLock lock_;
-
- // We keep linked lists of empty and non-empty spans.
- size_t size_class_; // My size class
- Span empty_; // Dummy header for list of empty spans
- Span nonempty_; // Dummy header for list of non-empty spans
- size_t counter_; // Number of free objects in cache entry
-
- // Here we reserve space for TCEntry cache slots. Since one size class can
- // end up getting all the TCEntries quota in the system we just preallocate
- // sufficient number of entries here.
- TCEntry tc_slots_[kNumTransferEntries];
-
- // Number of currently used cached entries in tc_slots_. This variable is
- // updated under a lock but can be read without one.
- int32_t used_slots_;
- // The current number of slots for this size class. This is an
- // adaptive value that is increased if there is lots of traffic
- // on a given size class.
- int32_t cache_size_;
- uintptr_t entropy_;
-};
-
-#if COMPILER(CLANG) && defined(__has_warning)
-#pragma clang diagnostic push
-#if __has_warning("-Wunused-private-field")
-#pragma clang diagnostic ignored "-Wunused-private-field"
-#endif
-#endif
-
-// Pad each CentralCache object to multiple of 64 bytes
-template <size_t SizeToPad>
-class TCMalloc_Central_FreeListPadded_Template : public TCMalloc_Central_FreeList {
-private:
- char pad[64 - SizeToPad];
-};
-
-// Zero-size specialization to avoid compiler error when TCMalloc_Central_FreeList happens
-// to be exactly 64 bytes.
-template <> class TCMalloc_Central_FreeListPadded_Template<0> : public TCMalloc_Central_FreeList {
-};
-
-typedef TCMalloc_Central_FreeListPadded_Template<sizeof(TCMalloc_Central_FreeList) % 64> TCMalloc_Central_FreeListPadded;
-
-#if COMPILER(CLANG) && defined(__has_warning)
-#pragma clang diagnostic pop
-#endif
-
-#if OS(DARWIN)
-struct Span;
-class TCMalloc_PageHeap;
-class TCMalloc_ThreadCache;
-template <typename T> class PageHeapAllocator;
-
-class FastMallocZone {
-public:
- static void init();
-
- static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
- static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
- static boolean_t check(malloc_zone_t*) { return true; }
- static void print(malloc_zone_t*, boolean_t) { }
- static void log(malloc_zone_t*, void*) { }
- static void forceLock(malloc_zone_t*) { }
- static void forceUnlock(malloc_zone_t*) { }
- static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
-
-private:
- FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
- static size_t size(malloc_zone_t*, const void*);
- static void* zoneMalloc(malloc_zone_t*, size_t);
- static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
- static void zoneFree(malloc_zone_t*, void*);
- static void* zoneRealloc(malloc_zone_t*, void*, size_t);
- static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
- static void zoneDestroy(malloc_zone_t*) { }
-
- malloc_zone_t m_zone;
- TCMalloc_PageHeap* m_pageHeap;
- TCMalloc_ThreadCache** m_threadHeaps;
- TCMalloc_Central_FreeListPadded* m_centralCaches;
- PageHeapAllocator<Span>* m_spanAllocator;
- PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
-};
-
-// This method declaration, and the constants below, are taken from Libc/gen/malloc.c.
-extern "C" void (*malloc_logger)(uint32_t typeFlags, uintptr_t zone, uintptr_t size, uintptr_t pointer, uintptr_t returnValue, uint32_t numberOfFramesToSkip);
-
-#endif
-
-class MallocHook {
- static bool stackLoggingEnabled;
-
-#if OS(DARWIN)
-
- enum StackLoggingType {
- StackLoggingTypeAlloc = 2,
- StackLoggingTypeDealloc = 4,
- };
-
- static void record(uint32_t typeFlags, uintptr_t zone, uintptr_t size, void* pointer, void* returnValue, uint32_t numberOfFramesToSkip)
- {
- malloc_logger(typeFlags, zone, size, reinterpret_cast<uintptr_t>(pointer), reinterpret_cast<uintptr_t>(returnValue), numberOfFramesToSkip);
- }
-
- static NEVER_INLINE void recordAllocation(void* pointer, size_t size)
- {
- // StackLoggingTypeAlloc takes the newly-allocated address in the returnValue argument, the size of the allocation
- // in the size argument and ignores all other arguments.
- record(StackLoggingTypeAlloc, 0, size, 0, pointer, 0);
- }
-
- static NEVER_INLINE void recordDeallocation(void* pointer)
- {
- // StackLoggingTypeDealloc takes the pointer in the size argument and ignores all other arguments.
- record(StackLoggingTypeDealloc, 0, reinterpret_cast<uintptr_t>(pointer), 0, 0, 0);
- }
-
-#endif
-
-public:
- static void init()
- {
-#if OS(DARWIN)
- // If the system allocator's malloc_logger has been set up then stack logging is enabled.
- stackLoggingEnabled = malloc_logger;
-#endif
- }
-
-#if OS(DARWIN)
- static ALWAYS_INLINE void InvokeNewHook(void* pointer, size_t size)
- {
- if (UNLIKELY(stackLoggingEnabled))
- recordAllocation(pointer, size);
- }
-
- static ALWAYS_INLINE void InvokeDeleteHook(void* pointer)
- {
-
- if (UNLIKELY(stackLoggingEnabled))
- recordDeallocation(pointer);
- }
-#else
- static ALWAYS_INLINE void InvokeNewHook(void*, size_t) { }
- static ALWAYS_INLINE void InvokeDeleteHook(void*) { }
-#endif
-};
-bool MallocHook::stackLoggingEnabled = false;
-
-#endif
-
-#ifndef WTF_CHANGES
-// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
-// you're porting to a system where you really can't get a stacktrace.
-#ifdef NO_TCMALLOC_SAMPLES
-// We use #define so code compiles even if you #include stacktrace.h somehow.
-# define GetStackTrace(stack, depth, skip) (0)
-#else
-# include <google/stacktrace.h>
-#endif
-#endif
-
-// Even if we have support for thread-local storage in the compiler
-// and linker, the OS may not support it. We need to check that at
-// runtime. Right now, we have to keep a manual set of "bad" OSes.
-#if defined(HAVE_TLS)
- static bool kernel_supports_tls = false; // be conservative
- static inline bool KernelSupportsTLS() {
- return kernel_supports_tls;
- }
-# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
- static void CheckIfKernelSupportsTLS() {
- kernel_supports_tls = false;
- }
-# else
-# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
- static void CheckIfKernelSupportsTLS() {
- struct utsname buf;
- if (uname(&buf) != 0) { // should be impossible
- MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
- kernel_supports_tls = false;
- } else if (strcasecmp(buf.sysname, "linux") == 0) {
- // The linux case: the first kernel to support TLS was 2.6.0
- if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
- kernel_supports_tls = false;
- else if (buf.release[0] == '2' && buf.release[1] == '.' &&
- buf.release[2] >= '0' && buf.release[2] < '6' &&
- buf.release[3] == '.') // 2.0 - 2.5
- kernel_supports_tls = false;
- else
- kernel_supports_tls = true;
- } else { // some other kernel, we'll be optimisitic
- kernel_supports_tls = true;
- }
- // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
- }
-# endif // HAVE_DECL_UNAME
-#endif // HAVE_TLS
-
-// __THROW is defined in glibc systems. It means, counter-intuitively,
-// "This function will never throw an exception." It's an optional
-// optimization tool, but we may need to use it to match glibc prototypes.
-#ifndef __THROW // I guess we're not on a glibc system
-# define __THROW // __THROW is just an optimization, so ok to make it ""
-#endif
-
-// -------------------------------------------------------------------------
-// Stack traces kept for sampled allocations
-// The following state is protected by pageheap_lock_.
-// -------------------------------------------------------------------------
-
-// size/depth are made the same size as a pointer so that some generic
-// code below can conveniently cast them back and forth to void*.
-static const int kMaxStackDepth = 31;
-struct StackTrace {
- uintptr_t size; // Size of object
- uintptr_t depth; // Number of PC values stored in array below
- void* stack[kMaxStackDepth];
-};
-static PageHeapAllocator<StackTrace> stacktrace_allocator;
-static Span sampled_objects;
-
-// -------------------------------------------------------------------------
-// Map from page-id to per-page data
-// -------------------------------------------------------------------------
-
-// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
-// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
-// because sometimes the sizeclass is all the information we need.
-
-// Selector class -- general selector uses 3-level map
-template <int BITS> class MapSelector {
- public:
- typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
- typedef PackedCache<BITS, uint64_t> CacheType;
-};
-
-#if defined(WTF_CHANGES)
-#if CPU(X86_64) || CPU(ARM64)
-// On all known X86-64 platforms, the upper 16 bits are always unused and therefore
-// can be excluded from the PageMap key.
-// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
-
-static const size_t kBitsUnusedOn64Bit = 16;
-#else
-static const size_t kBitsUnusedOn64Bit = 0;
-#endif
-
-// A three-level map for 64-bit machines
-template <> class MapSelector<64> {
- public:
- typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
- typedef PackedCache<64, uint64_t> CacheType;
-};
-#endif
-
-// A two-level map for 32-bit machines
-template <> class MapSelector<32> {
- public:
- typedef TCMalloc_PageMap2<32 - kPageShift> Type;
- typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
-};
-
-// -------------------------------------------------------------------------
-// Page-level allocator
-// * Eager coalescing
-//
-// Heap for page-level allocation. We allow allocating and freeing a
-// contiguous runs of pages (called a "span").
-// -------------------------------------------------------------------------
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-// The page heap maintains a free list for spans that are no longer in use by
-// the central cache or any thread caches. We use a background thread to
-// periodically scan the free list and release a percentage of it back to the OS.
-
-// If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the
-// background thread:
-// - wakes up
-// - pauses for kScavengeDelayInSeconds
-// - returns to the OS a percentage of the memory that remained unused during
-// that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_)
-// The goal of this strategy is to reduce memory pressure in a timely fashion
-// while avoiding thrashing the OS allocator.
-
-// Time delay before the page heap scavenger will consider returning pages to
-// the OS.
-static const int kScavengeDelayInSeconds = 2;
-
-// Approximate percentage of free committed pages to return to the OS in one
-// scavenge.
-static const float kScavengePercentage = .5f;
-
-// number of span lists to keep spans in when memory is returned.
-static const int kMinSpanListsWithSpans = 32;
-
-// Number of free committed pages that we want to keep around. The minimum number of pages used when there
-// is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 528 pages.
-static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f);
-
-#endif
-
-static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
-
-class TCMalloc_PageHeap {
- public:
- void init();
-
- // Allocate a run of "n" pages. Returns zero if out of memory.
- Span* New(Length n);
-
- // Delete the span "[p, p+n-1]".
- // REQUIRES: span was returned by earlier call to New() and
- // has not yet been deleted.
- void Delete(Span* span);
-
- // Mark an allocated span as being used for small objects of the
- // specified size-class.
- // REQUIRES: span was returned by an earlier call to New()
- // and has not yet been deleted.
- void RegisterSizeClass(Span* span, size_t sc);
-
- // Split an allocated span into two spans: one of length "n" pages
- // followed by another span of length "span->length - n" pages.
- // Modifies "*span" to point to the first span of length "n" pages.
- // Returns a pointer to the second span.
- //
- // REQUIRES: "0 < n < span->length"
- // REQUIRES: !span->free
- // REQUIRES: span->sizeclass == 0
- Span* Split(Span* span, Length n);
-
- // Return the descriptor for the specified page.
- inline Span* GetDescriptor(PageID p) const {
- return reinterpret_cast<Span*>(pagemap_.get(p));
- }
-
-#ifdef WTF_CHANGES
- inline Span* GetDescriptorEnsureSafe(PageID p)
- {
- pagemap_.Ensure(p, 1);
- return GetDescriptor(p);
- }
-
- size_t ReturnedBytes() const;
-#endif
-
- // Dump state to stderr
-#ifndef WTF_CHANGES
- void Dump(TCMalloc_Printer* out);
-#endif
-
- // Return number of bytes allocated from system
- inline uint64_t SystemBytes() const { return system_bytes_; }
-
- // Return number of free bytes in heap
- uint64_t FreeBytes() const {
- return (static_cast<uint64_t>(free_pages_) << kPageShift);
- }
-
- bool Check();
- size_t CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted);
-
- // Release all pages on the free list for reuse by the OS:
- void ReleaseFreePages();
- void ReleaseFreeList(Span*, Span*);
-
- // Return 0 if we have no information, or else the correct sizeclass for p.
- // Reads and writes to pagemap_cache_ do not require locking.
- // The entries are 64 bits on 64-bit hardware and 16 bits on
- // 32-bit hardware, and we don't mind raciness as long as each read of
- // an entry yields a valid entry, not a partially updated entry.
- size_t GetSizeClassIfCached(PageID p) const {
- return pagemap_cache_.GetOrDefault(p, 0);
- }
- void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
-
- private:
- // Pick the appropriate map and cache types based on pointer size
- typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
- typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
- PageMap pagemap_;
- mutable PageMapCache pagemap_cache_;
-
- // We segregate spans of a given size into two circular linked
- // lists: one for normal spans, and one for spans whose memory
- // has been returned to the system.
- struct SpanList {
- Span normal;
- Span returned;
- };
-
- // List of free spans of length >= kMaxPages
- SpanList large_;
-
- // Array mapping from span length to a doubly linked list of free spans
- SpanList free_[kMaxPages];
-
- // Number of pages kept in free lists
- uintptr_t free_pages_;
-
- // Used for hardening
- uintptr_t entropy_;
-
- // Bytes allocated from system
- uint64_t system_bytes_;
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // Number of pages kept in free lists that are still committed.
- Length free_committed_pages_;
-
- // Minimum number of free committed pages since last scavenge. (Can be 0 if
- // we've committed new pages since the last scavenge.)
- Length min_free_committed_pages_since_last_scavenge_;
-#endif
-
- bool GrowHeap(Length n);
-
- // REQUIRES span->length >= n
- // Remove span from its free list, and move any leftover part of
- // span into appropriate free lists. Also update "span" to have
- // length exactly "n" and mark it as non-free so it can be returned
- // to the client.
- //
- // "released" is true iff "span" was found on a "returned" list.
- void Carve(Span* span, Length n, bool released);
-
- void RecordSpan(Span* span) {
- pagemap_.set(span->start, span);
- if (span->length > 1) {
- pagemap_.set(span->start + span->length - 1, span);
- }
- }
-
- // Allocate a large span of length == n. If successful, returns a
- // span of exactly the specified length. Else, returns NULL.
- Span* AllocLarge(Length n);
-
-#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // Incrementally release some memory to the system.
- // IncrementalScavenge(n) is called whenever n pages are freed.
- void IncrementalScavenge(Length n);
-#endif
-
- // Number of pages to deallocate before doing more scavenging
- int64_t scavenge_counter_;
-
- // Index of last free list we scavenged
- size_t scavenge_index_;
-
-#if defined(WTF_CHANGES) && OS(DARWIN)
- friend class FastMallocZone;
-#endif
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- void initializeScavenger();
- ALWAYS_INLINE void signalScavenger();
- void scavenge();
- ALWAYS_INLINE bool shouldScavenge() const;
-
-#if HAVE(DISPATCH_H) || OS(WINDOWS)
- void periodicScavenge();
- ALWAYS_INLINE bool isScavengerSuspended();
- ALWAYS_INLINE void scheduleScavenger();
- ALWAYS_INLINE void rescheduleScavenger();
- ALWAYS_INLINE void suspendScavenger();
-#endif
-
-#if HAVE(DISPATCH_H)
- dispatch_queue_t m_scavengeQueue;
- dispatch_source_t m_scavengeTimer;
- bool m_scavengingSuspended;
-#elif OS(WINDOWS)
- static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
- HANDLE m_scavengeQueueTimer;
-#else
- static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
- NO_RETURN void scavengerThread();
-
- // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
- // it's blocked waiting for more pages to be deleted.
- bool m_scavengeThreadActive;
-
- pthread_mutex_t m_scavengeMutex;
- pthread_cond_t m_scavengeCondition;
-#endif
-
-#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-};
-
-void TCMalloc_PageHeap::init()
-{
- pagemap_.init(MetaDataAlloc);
- pagemap_cache_ = PageMapCache(0);
- free_pages_ = 0;
- system_bytes_ = 0;
- entropy_ = HARDENING_ENTROPY;
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- free_committed_pages_ = 0;
- min_free_committed_pages_since_last_scavenge_ = 0;
-#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-
- scavenge_counter_ = 0;
- // Start scavenging at kMaxPages list
- scavenge_index_ = kMaxPages-1;
- COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
- DLL_Init(&large_.normal, entropy_);
- DLL_Init(&large_.returned, entropy_);
- for (size_t i = 0; i < kMaxPages; i++) {
- DLL_Init(&free_[i].normal, entropy_);
- DLL_Init(&free_[i].returned, entropy_);
- }
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- initializeScavenger();
-#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-}
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-
-#if HAVE(DISPATCH_H)
-
-void TCMalloc_PageHeap::initializeScavenger()
-{
- m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
- m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
- uint64_t scavengeDelayInNanoseconds = kScavengeDelayInSeconds * NSEC_PER_SEC;
- dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, scavengeDelayInNanoseconds);
- dispatch_source_set_timer(m_scavengeTimer, startTime, scavengeDelayInNanoseconds, scavengeDelayInNanoseconds / 10);
- dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
- m_scavengingSuspended = true;
-}
-
-ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
-{
- ASSERT(pageheap_lock.IsHeld());
- return m_scavengingSuspended;
-}
-
-ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
-{
- ASSERT(pageheap_lock.IsHeld());
- m_scavengingSuspended = false;
- dispatch_resume(m_scavengeTimer);
-}
-
-ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
-{
- // Nothing to do here for libdispatch.
-}
-
-ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
-{
- ASSERT(pageheap_lock.IsHeld());
- m_scavengingSuspended = true;
- dispatch_suspend(m_scavengeTimer);
-}
-
-#elif OS(WINDOWS)
-
-void TCMalloc_PageHeap::scavengerTimerFired(void* context, BOOLEAN)
-{
- static_cast<TCMalloc_PageHeap*>(context)->periodicScavenge();
-}
-
-void TCMalloc_PageHeap::initializeScavenger()
-{
- m_scavengeQueueTimer = 0;
-}
-
-ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
-{
- ASSERT(pageheap_lock.IsHeld());
- return !m_scavengeQueueTimer;
-}
-
-ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
-{
- // We need to use WT_EXECUTEONLYONCE here and reschedule the timer, because
- // Windows will fire the timer event even when the function is already running.
- ASSERT(pageheap_lock.IsHeld());
- CreateTimerQueueTimer(&m_scavengeQueueTimer, 0, scavengerTimerFired, this, kScavengeDelayInSeconds * 1000, 0, WT_EXECUTEONLYONCE);
-}
-
-ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
-{
- // We must delete the timer and create it again, because it is not possible to retrigger a timer on Windows.
- suspendScavenger();
- scheduleScavenger();
-}
-
-ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
-{
- ASSERT(pageheap_lock.IsHeld());
- HANDLE scavengeQueueTimer = m_scavengeQueueTimer;
- m_scavengeQueueTimer = 0;
- DeleteTimerQueueTimer(0, scavengeQueueTimer, 0);
-}
-
-#else
-
-void TCMalloc_PageHeap::initializeScavenger()
-{
- // Create a non-recursive mutex.
-#if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFAULT
- pthread_mutex_init(&m_scavengeMutex, 0);
-#else
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
-
- pthread_mutex_init(&m_scavengeMutex, &attr);
-
- pthread_mutexattr_destroy(&attr);
-#endif
-
- pthread_cond_init(&m_scavengeCondition, 0);
- m_scavengeThreadActive = true;
- pthread_t thread;
- pthread_create(&thread, 0, runScavengerThread, this);
-}
-
-void* TCMalloc_PageHeap::runScavengerThread(void* context)
-{
- static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
-#if (COMPILER(MSVC) || COMPILER(SUNCC))
- // Without this, Visual Studio and Sun Studio will complain that this method does not return a value.
- return 0;
-#endif
-}
-
-ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
-{
- // shouldScavenge() should be called only when the pageheap_lock spinlock is held, additionally,
- // m_scavengeThreadActive is only set to false whilst pageheap_lock is held. The caller must ensure this is
- // taken prior to calling this method. If the scavenger thread is sleeping and shouldScavenge() indicates there
- // is memory to free the scavenger thread is signalled to start.
- ASSERT(pageheap_lock.IsHeld());
- if (!m_scavengeThreadActive && shouldScavenge())
- pthread_cond_signal(&m_scavengeCondition);
-}
-
-#endif
-
-void TCMalloc_PageHeap::scavenge()
+void* fastMalloc(size_t size)
{
- size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
- size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
-
- Length lastFreeCommittedPages = free_committed_pages_;
- while (free_committed_pages_ > targetPageCount) {
- ASSERT(Check());
- for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCount; i--) {
- SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
- // If the span size is bigger than kMinSpanListsWithSpans pages return all the spans in the list, else return all but 1 span.
- // Return only 50% of a spanlist at a time so spans of size 1 are not the only ones left.
- size_t length = DLL_Length(&slist->normal, entropy_);
- size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2;
- for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal, entropy_) && free_committed_pages_ > targetPageCount; j++) {
- Span* s = slist->normal.prev(entropy_);
- DLL_Remove(s, entropy_);
- ASSERT(!s->decommitted);
- if (!s->decommitted) {
- TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
- static_cast<size_t>(s->length << kPageShift));
- ASSERT(free_committed_pages_ >= s->length);
- free_committed_pages_ -= s->length;
- s->decommitted = true;
- }
- DLL_Prepend(&slist->returned, s, entropy_);
- }
- }
-
- if (lastFreeCommittedPages == free_committed_pages_)
- break;
- lastFreeCommittedPages = free_committed_pages_;
- }
-
- min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
+ ASSERT_IS_WITHIN_LIMIT(size);
+ return bmalloc::api::malloc(size);
}
-ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const
+void* fastCalloc(size_t numElements, size_t elementSize)
{
- return free_committed_pages_ > kMinimumFreeCommittedPageCount;
-}
-
-#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-
-inline Span* TCMalloc_PageHeap::New(Length n) {
- ASSERT(Check());
- ASSERT(n > 0);
-
- // Find first size >= n that has a non-empty list
- for (Length s = n; s < kMaxPages; s++) {
- Span* ll = NULL;
- bool released = false;
- if (!DLL_IsEmpty(&free_[s].normal, entropy_)) {
- // Found normal span
- ll = &free_[s].normal;
- } else if (!DLL_IsEmpty(&free_[s].returned, entropy_)) {
- // Found returned span; reallocate it
- ll = &free_[s].returned;
- released = true;
- } else {
- // Keep looking in larger classes
- continue;
- }
-
- Span* result = ll->next(entropy_);
- Carve(result, n, released);
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
- // free committed pages count.
- ASSERT(free_committed_pages_ >= n);
- free_committed_pages_ -= n;
- if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
- min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
-#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- ASSERT(Check());
- free_pages_ -= n;
- return result;
- }
-
- Span* result = AllocLarge(n);
- if (result != NULL) {
- ASSERT_SPAN_COMMITTED(result);
- return result;
- }
-
- // Grow the heap and try again
- if (!GrowHeap(n)) {
- ASSERT(Check());
- return NULL;
- }
-
- return New(n);
-}
-
-Span* TCMalloc_PageHeap::AllocLarge(Length n) {
- // find the best span (closest to n in size).
- // The following loops implements address-ordered best-fit.
- bool from_released = false;
- Span *best = NULL;
-
- // Search through normal list
- for (Span* span = large_.normal.next(entropy_);
- span != &large_.normal;
- span = span->next(entropy_)) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- from_released = false;
- }
- }
- }
-
- // Search through released list in case it has a better fit
- for (Span* span = large_.returned.next(entropy_);
- span != &large_.returned;
- span = span->next(entropy_)) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- from_released = true;
- }
- }
- }
-
- if (best != NULL) {
- Carve(best, n, from_released);
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
- // free committed pages count.
- ASSERT(free_committed_pages_ >= n);
- free_committed_pages_ -= n;
- if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
- min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
-#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- ASSERT(Check());
- free_pages_ -= n;
- return best;
- }
- return NULL;
-}
-
-Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
- ASSERT(0 < n);
- ASSERT(n < span->length);
- ASSERT(!span->free);
- ASSERT(span->sizeclass == 0);
- Event(span, 'T', n);
-
- const Length extra = span->length - n;
- Span* leftover = NewSpan(span->start + n, extra);
- Event(leftover, 'U', extra);
- RecordSpan(leftover);
- pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
- span->length = n;
-
- return leftover;
-}
-
-inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
- ASSERT(n > 0);
- DLL_Remove(span, entropy_);
- span->free = 0;
- Event(span, 'A', n);
-
- if (released) {
- // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time.
- ASSERT(span->decommitted);
- TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift));
- span->decommitted = false;
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- free_committed_pages_ += span->length;
-#endif
- }
-
- const int extra = static_cast<int>(span->length - n);
- ASSERT(extra >= 0);
- if (extra > 0) {
- Span* leftover = NewSpan(span->start + n, extra);
- leftover->free = 1;
- leftover->decommitted = false;
- Event(leftover, 'S', extra);
- RecordSpan(leftover);
-
- // Place leftover span on appropriate free list
- SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
- Span* dst = &listpair->normal;
- DLL_Prepend(dst, leftover, entropy_);
-
- span->length = n;
- pagemap_.set(span->start + n - 1, span);
- }
-}
-
-static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
-{
- if (destination->decommitted && !other->decommitted) {
- TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
- static_cast<size_t>(other->length << kPageShift));
- } else if (other->decommitted && !destination->decommitted) {
- TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
- static_cast<size_t>(destination->length << kPageShift));
- destination->decommitted = true;
- }
-}
-
-inline void TCMalloc_PageHeap::Delete(Span* span) {
- ASSERT(Check());
- ASSERT(!span->free);
- ASSERT(span->length > 0);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start + span->length - 1) == span);
- span->sizeclass = 0;
-#ifndef NO_TCMALLOC_SAMPLES
- span->sample = 0;
-#endif
-
- // Coalesce -- we guarantee that "p" != 0, so no bounds checking
- // necessary. We do not bother resetting the stale pagemap
- // entries for the pieces we are merging together because we only
- // care about the pagemap entries for the boundaries.
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // Track the total size of the neighboring free spans that are committed.
- Length neighboringCommittedSpansLength = 0;
-#endif
- const PageID p = span->start;
- const Length n = span->length;
- Span* prev = GetDescriptor(p-1);
- if (prev != NULL && prev->free) {
- // Merge preceding span into this span
- ASSERT(prev->start + prev->length == p);
- const Length len = prev->length;
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- if (!prev->decommitted)
- neighboringCommittedSpansLength += len;
-#endif
- mergeDecommittedStates(span, prev);
- DLL_Remove(prev, entropy_);
- DeleteSpan(prev);
- span->start -= len;
- span->length += len;
- pagemap_.set(span->start, span);
- Event(span, 'L', len);
- }
- Span* next = GetDescriptor(p+n);
- if (next != NULL && next->free) {
- // Merge next span into this span
- ASSERT(next->start == p+n);
- const Length len = next->length;
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- if (!next->decommitted)
- neighboringCommittedSpansLength += len;
-#endif
- mergeDecommittedStates(span, next);
- DLL_Remove(next, entropy_);
- DeleteSpan(next);
- span->length += len;
- pagemap_.set(span->start + span->length - 1, span);
- Event(span, 'R', len);
- }
-
- Event(span, 'D', span->length);
- span->free = 1;
- if (span->decommitted) {
- if (span->length < kMaxPages)
- DLL_Prepend(&free_[span->length].returned, span, entropy_);
- else
- DLL_Prepend(&large_.returned, span, entropy_);
- } else {
- if (span->length < kMaxPages)
- DLL_Prepend(&free_[span->length].normal, span, entropy_);
- else
- DLL_Prepend(&large_.normal, span, entropy_);
- }
- free_pages_ += n;
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- if (span->decommitted) {
- // If the merged span is decommitted, that means we decommitted any neighboring spans that were
- // committed. Update the free committed pages count.
- free_committed_pages_ -= neighboringCommittedSpansLength;
- if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
- min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
- } else {
- // If the merged span remains committed, add the deleted span's size to the free committed pages count.
- free_committed_pages_ += n;
- }
-
- // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
- signalScavenger();
-#else
- IncrementalScavenge(n);
-#endif
-
- ASSERT(Check());
-}
-
-#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
- // Fast path; not yet time to release memory
- scavenge_counter_ -= n;
- if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
-
-#if PLATFORM(IOS)
- static const size_t kDefaultReleaseDelay = 64;
-#else
- // If there is nothing to release, wait for so many pages before
- // scavenging again. With 4K pages, this comes to 16MB of memory.
- static const size_t kDefaultReleaseDelay = 1 << 8;
-#endif
-
- // Find index of free list to scavenge
- size_t index = scavenge_index_ + 1;
- uintptr_t entropy = entropy_;
- for (size_t i = 0; i < kMaxPages+1; i++) {
- if (index > kMaxPages) index = 0;
- SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
- if (!DLL_IsEmpty(&slist->normal, entropy)) {
- // Release the last span on the normal portion of this list
- Span* s = slist->normal.prev(entropy);
- DLL_Remove(s, entropy_);
- TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
- static_cast<size_t>(s->length << kPageShift));
- s->decommitted = true;
- DLL_Prepend(&slist->returned, s, entropy);
-
-#if PLATFORM(IOS)
- scavenge_counter_ = std::max<size_t>(16UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
-#else
- scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
-#endif
-
- if (index == kMaxPages && !DLL_IsEmpty(&slist->normal, entropy))
- scavenge_index_ = index - 1;
- else
- scavenge_index_ = index;
- return;
- }
- index++;
- }
-
- // Nothing to scavenge, delay for a while
- scavenge_counter_ = kDefaultReleaseDelay;
-}
-#endif
-
-void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
- // Associate span object with all interior pages as well
- ASSERT(!span->free);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start+span->length-1) == span);
- Event(span, 'C', sc);
- span->sizeclass = static_cast<unsigned int>(sc);
- for (Length i = 1; i < span->length-1; i++) {
- pagemap_.set(span->start+i, span);
- }
-}
-
-#ifdef WTF_CHANGES
-size_t TCMalloc_PageHeap::ReturnedBytes() const {
- size_t result = 0;
- for (unsigned s = 0; s < kMaxPages; s++) {
- const int r_length = DLL_Length(&free_[s].returned, entropy_);
- unsigned r_pages = s * r_length;
- result += r_pages << kPageShift;
- }
-
- for (Span* s = large_.returned.next(entropy_); s != &large_.returned; s = s->next(entropy_))
- result += s->length << kPageShift;
+ ASSERT_IS_WITHIN_LIMIT(numElements * elementSize);
+ Checked<size_t> checkedSize = elementSize;
+ checkedSize *= numElements;
+ void* result = fastZeroedMalloc(checkedSize.unsafeGet());
+ if (!result)
+ CRASH();
return result;
}
-#endif
-
-#ifndef WTF_CHANGES
-static double PagesToMB(uint64_t pages) {
- return (pages << kPageShift) / 1048576.0;
-}
-
-void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
- int nonempty_sizes = 0;
- for (int s = 0; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
- nonempty_sizes++;
- }
- }
- out->printf("------------------------------------------------\n");
- out->printf("PageHeap: %d sizes; %6.1f MB free\n",
- nonempty_sizes, PagesToMB(free_pages_));
- out->printf("------------------------------------------------\n");
- uint64_t total_normal = 0;
- uint64_t total_returned = 0;
- for (int s = 0; s < kMaxPages; s++) {
- const int n_length = DLL_Length(&free_[s].normal);
- const int r_length = DLL_Length(&free_[s].returned);
- if (n_length + r_length > 0) {
- uint64_t n_pages = s * n_length;
- uint64_t r_pages = s * r_length;
- total_normal += n_pages;
- total_returned += r_pages;
- out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
- "; unmapped: %6.1f MB; %6.1f MB cum\n",
- s,
- (n_length + r_length),
- PagesToMB(n_pages + r_pages),
- PagesToMB(total_normal + total_returned),
- PagesToMB(r_pages),
- PagesToMB(total_returned));
- }
- }
-
- uint64_t n_pages = 0;
- uint64_t r_pages = 0;
- int n_spans = 0;
- int r_spans = 0;
- out->printf("Normal large spans:\n");
- for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
- out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
- s->length, PagesToMB(s->length));
- n_pages += s->length;
- n_spans++;
- }
- out->printf("Unmapped large spans:\n");
- for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
- out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
- s->length, PagesToMB(s->length));
- r_pages += s->length;
- r_spans++;
- }
- total_normal += n_pages;
- total_returned += r_pages;
- out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
- "; unmapped: %6.1f MB; %6.1f MB cum\n",
- (n_spans + r_spans),
- PagesToMB(n_pages + r_pages),
- PagesToMB(total_normal + total_returned),
- PagesToMB(r_pages),
- PagesToMB(total_returned));
-}
-#endif
-
-bool TCMalloc_PageHeap::GrowHeap(Length n) {
- ASSERT(kMaxPages >= kMinSystemAlloc);
- if (n > kMaxValidPages) return false;
- Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
- size_t actual_size;
- void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- if (ptr == NULL) {
- if (n < ask) {
- // Try growing just "n" pages
- ask = n;
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- }
- if (ptr == NULL) return false;
- }
- ask = actual_size >> kPageShift;
-
- uint64_t old_system_bytes = system_bytes_;
- system_bytes_ += (ask << kPageShift);
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- ASSERT(p > 0);
-
- // If we have already a lot of pages allocated, just pre allocate a bunch of
- // memory for the page map. This prevents fragmentation by pagemap metadata
- // when a program keeps allocating and freeing large blocks.
-
- if (old_system_bytes < kPageMapBigAllocationThreshold
- && system_bytes_ >= kPageMapBigAllocationThreshold) {
- pagemap_.PreallocateMoreMemory();
- }
-
- // Make sure pagemap_ has entries for all of the new pages.
- // Plus ensure one before and one after so coalescing code
- // does not need bounds-checking.
- if (pagemap_.Ensure(p-1, ask+2)) {
- // Pretend the new area is allocated and then Delete() it to
- // cause any necessary coalescing to occur.
- //
- // We do not adjust free_pages_ here since Delete() will do it for us.
- Span* span = NewSpan(p, ask);
- RecordSpan(span);
- Delete(span);
- ASSERT(Check());
- return true;
- } else {
- // We could not allocate memory within "pagemap_"
- // TODO: Once we can return memory to the system, return the new span
- return false;
- }
-}
-
-bool TCMalloc_PageHeap::Check() {
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- size_t totalFreeCommitted = 0;
-#endif
- ASSERT(free_[0].normal.next(entropy_) == &free_[0].normal);
- ASSERT(free_[0].returned.next(entropy_) == &free_[0].returned);
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- totalFreeCommitted = CheckList(&large_.normal, kMaxPages, 1000000000, false);
-#else
- CheckList(&large_.normal, kMaxPages, 1000000000, false);
-#endif
- CheckList(&large_.returned, kMaxPages, 1000000000, true);
- for (Length s = 1; s < kMaxPages; s++) {
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- totalFreeCommitted += CheckList(&free_[s].normal, s, s, false);
-#else
- CheckList(&free_[s].normal, s, s, false);
-#endif
- CheckList(&free_[s].returned, s, s, true);
- }
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- ASSERT(totalFreeCommitted == free_committed_pages_);
-#endif
- return true;
-}
-
-#if ASSERT_DISABLED
-size_t TCMalloc_PageHeap::CheckList(Span*, Length, Length, bool) {
- return 0;
-}
-#else
-size_t TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted) {
- size_t freeCount = 0;
- for (Span* s = list->next(entropy_); s != list; s = s->next(entropy_)) {
- CHECK_CONDITION(s->free);
- CHECK_CONDITION(s->length >= min_pages);
- CHECK_CONDITION(s->length <= max_pages);
- CHECK_CONDITION(GetDescriptor(s->start) == s);
- CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
- CHECK_CONDITION(s->decommitted == decommitted);
- freeCount += s->length;
- }
- return freeCount;
-}
-#endif
-
-void TCMalloc_PageHeap::ReleaseFreeList(Span* list, Span* returned) {
- // Walk backwards through list so that when we push these
- // spans on the "returned" list, we preserve the order.
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- size_t freePageReduction = 0;
-#endif
-
- while (!DLL_IsEmpty(list, entropy_)) {
- Span* s = list->prev(entropy_);
-
- DLL_Remove(s, entropy_);
- s->decommitted = true;
- DLL_Prepend(returned, s, entropy_);
- TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
- static_cast<size_t>(s->length << kPageShift));
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- freePageReduction += s->length;
-#endif
- }
-
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- free_committed_pages_ -= freePageReduction;
- if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
- min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
-#endif
-}
-
-void TCMalloc_PageHeap::ReleaseFreePages() {
- for (Length s = 0; s < kMaxPages; s++) {
- ReleaseFreeList(&free_[s].normal, &free_[s].returned);
- }
- ReleaseFreeList(&large_.normal, &large_.returned);
- ASSERT(Check());
-}
-
-//-------------------------------------------------------------------
-// Free list
-//-------------------------------------------------------------------
-
-class TCMalloc_ThreadCache_FreeList {
- private:
- HardenedSLL list_; // Linked list of nodes
- uint16_t length_; // Current length
- uint16_t lowater_; // Low water mark for list length
- uintptr_t entropy_; // Entropy source for hardening
-
- public:
- void Init(uintptr_t entropy) {
- list_.setValue(NULL);
- length_ = 0;
- lowater_ = 0;
- entropy_ = entropy;
-#if ENABLE(TCMALLOC_HARDENING)
- ASSERT(entropy_);
-#endif
- }
-
- // Return current length of list
- int length() const {
- return length_;
- }
-
- // Is list empty?
- bool empty() const {
- return !list_;
- }
-
- // Low-water mark management
- int lowwatermark() const { return lowater_; }
- void clear_lowwatermark() { lowater_ = length_; }
-
- ALWAYS_INLINE void Push(HardenedSLL ptr) {
- SLL_Push(&list_, ptr, entropy_);
- length_++;
- }
-
- void PushRange(int N, HardenedSLL start, HardenedSLL end) {
- SLL_PushRange(&list_, start, end, entropy_);
- length_ = length_ + static_cast<uint16_t>(N);
- }
-
- void PopRange(int N, HardenedSLL* start, HardenedSLL* end) {
- SLL_PopRange(&list_, N, start, end, entropy_);
- ASSERT(length_ >= N);
- length_ = length_ - static_cast<uint16_t>(N);
- if (length_ < lowater_) lowater_ = length_;
- }
-
- ALWAYS_INLINE void* Pop() {
- ASSERT(list_);
- length_--;
- if (length_ < lowater_) lowater_ = length_;
- return SLL_Pop(&list_, entropy_).value();
- }
-
- // Runs through the linked list to ensure that
- // we can do that, and ensures that 'missing'
- // is not present
- NEVER_INLINE void Validate(HardenedSLL missing, size_t size) {
- HardenedSLL node = list_;
- UNUSED_PARAM(size);
- while (node) {
- RELEASE_ASSERT(node != missing);
- RELEASE_ASSERT(IS_DEFINITELY_POISONED(node.value(), size));
- node = SLL_Next(node, entropy_);
- }
- }
-
-#ifdef WTF_CHANGES
- template <class Finder, class Reader>
- void enumerateFreeObjects(Finder& finder, const Reader& reader)
- {
- for (HardenedSLL nextObject = list_; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_)))
- finder.visit(nextObject.value());
- }
-#endif
-};
-
-//-------------------------------------------------------------------
-// Data kept per thread
-//-------------------------------------------------------------------
-
-class TCMalloc_ThreadCache {
- private:
- typedef TCMalloc_ThreadCache_FreeList FreeList;
-#if OS(WINDOWS)
- typedef DWORD ThreadIdentifier;
-#else
- typedef pthread_t ThreadIdentifier;
-#endif
-
- size_t size_; // Combined size of data
- ThreadIdentifier tid_; // Which thread owns it
- bool in_setspecific_; // Called pthread_setspecific?
- FreeList list_[kNumClasses]; // Array indexed by size-class
-
- // We sample allocations, biased by the size of the allocation
- uint32_t rnd_; // Cheap random number generator
- size_t bytes_until_sample_; // Bytes until we sample next
- uintptr_t entropy_; // Entropy value used for hardening
-
- // Allocate a new heap. REQUIRES: pageheap_lock is held.
- static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid, uintptr_t entropy);
-
- // Use only as pthread thread-specific destructor function.
- static void DestroyThreadCache(void* ptr);
- public:
- // All ThreadCache objects are kept in a linked list (for stats collection)
- TCMalloc_ThreadCache* next_;
- TCMalloc_ThreadCache* prev_;
-
- void Init(ThreadIdentifier tid, uintptr_t entropy);
- void Cleanup();
-
- // Accessors (mostly just for printing stats)
- int freelist_length(size_t cl) const { return list_[cl].length(); }
-
- // Total byte size in cache
- size_t Size() const { return size_; }
-
- ALWAYS_INLINE void* Allocate(size_t size);
- void Deallocate(HardenedSLL ptr, size_t size_class);
-
- ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize);
- void ReleaseToCentralCache(size_t cl, int N);
- void Scavenge();
- void Print() const;
-
- // Record allocation of "k" bytes. Return true iff allocation
- // should be sampled
- bool SampleAllocation(size_t k);
-
- // Pick next sampling point
- void PickNextSample(size_t k);
-
- static void InitModule();
- static void InitTSD();
- static TCMalloc_ThreadCache* GetThreadHeap();
- static TCMalloc_ThreadCache* GetCache();
- static TCMalloc_ThreadCache* GetCacheIfPresent();
- static TCMalloc_ThreadCache* CreateCacheIfNecessary();
- static void DeleteCache(TCMalloc_ThreadCache* heap);
- static void BecomeIdle();
- static void RecomputeThreadCacheSize();
-
-#ifdef WTF_CHANGES
- template <class Finder, class Reader>
- void enumerateFreeObjects(Finder& finder, const Reader& reader)
- {
- for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
- list_[sizeClass].enumerateFreeObjects(finder, reader);
- }
-#endif
-};
-
-//-------------------------------------------------------------------
-// Global variables
-//-------------------------------------------------------------------
-
-// Central cache -- a collection of free-lists, one per size-class.
-// We have a separate lock per free-list to reduce contention.
-static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
-
-// Page-level allocator
-static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)];
-static bool phinited = false;
-
-// Avoid extra level of indirection by making "pageheap" be just an alias
-// of pageheap_memory.
-typedef union {
- void* m_memory;
- TCMalloc_PageHeap* m_pageHeap;
-} PageHeapUnion;
-
-static inline TCMalloc_PageHeap* getPageHeap()
+void* fastRealloc(void* object, size_t size)
{
- PageHeapUnion u = { &pageheap_memory[0] };
- return u.m_pageHeap;
+ ASSERT_IS_WITHIN_LIMIT(size);
+ return bmalloc::api::realloc(object, size);
}
-#define pageheap getPageHeap()
-
-size_t fastMallocGoodSize(size_t bytes)
+void fastFree(void* object)
{
- if (!phinited)
- TCMalloc_ThreadCache::InitModule();
- return AllocationSize(bytes);
+ bmalloc::api::free(object);
}
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-
-#if HAVE(DISPATCH_H) || OS(WINDOWS)
-
-void TCMalloc_PageHeap::periodicScavenge()
+size_t fastMallocSize(const void*)
{
- SpinLockHolder h(&pageheap_lock);
- pageheap->scavenge();
-
- if (shouldScavenge()) {
- rescheduleScavenger();
- return;
- }
-
- suspendScavenger();
+ // FIXME: This is incorrect; best fix is probably to remove this function.
+ // Caller currently are all using this for assertion, not to actually check
+ // the size of the allocation, so maybe we can come up with something for that.
+ return 1;
}
-ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
+size_t fastMallocGoodSize(size_t size)
{
- ASSERT(pageheap_lock.IsHeld());
- if (isScavengerSuspended() && shouldScavenge())
- scheduleScavenger();
+ return size;
}
-#else
-
-void TCMalloc_PageHeap::scavengerThread()
+void* fastAlignedMalloc(size_t alignment, size_t size)
{
-#if HAVE(PTHREAD_SETNAME_NP)
- pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
-#endif
-
- while (1) {
- pageheap_lock.Lock();
- if (!shouldScavenge()) {
- // Set to false so that signalScavenger() will check whether we need to be siganlled.
- m_scavengeThreadActive = false;
-
- // We need to unlock now, as this thread will block on the condvar until scavenging is required.
- pageheap_lock.Unlock();
-
- // Block until there are enough free committed pages to release back to the system.
- pthread_mutex_lock(&m_scavengeMutex);
- pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
- // After exiting the pthread_cond_wait, we hold the lock on m_scavengeMutex. Unlock it to prevent
- // deadlock next time round the loop.
- pthread_mutex_unlock(&m_scavengeMutex);
-
- // Set to true to prevent unnecessary signalling of the condvar.
- m_scavengeThreadActive = true;
- } else
- pageheap_lock.Unlock();
-
- // Wait for a while to calculate how much memory remains unused during this pause.
- sleep(kScavengeDelayInSeconds);
-
- {
- SpinLockHolder h(&pageheap_lock);
- pageheap->scavenge();
- }
- }
+ ASSERT_IS_WITHIN_LIMIT(size);
+ return bmalloc::api::memalign(alignment, size);
}
-#endif
-
-#endif
-
-// If TLS is available, we also store a copy
-// of the per-thread object in a __thread variable
-// since __thread variables are faster to read
-// than pthread_getspecific(). We still need
-// pthread_setspecific() because __thread
-// variables provide no way to run cleanup
-// code when a thread is destroyed.
-#ifdef HAVE_TLS
-static __thread TCMalloc_ThreadCache *threadlocal_heap;
-#endif
-// Thread-specific key. Initialization here is somewhat tricky
-// because some Linux startup code invokes malloc() before it
-// is in a good enough state to handle pthread_keycreate().
-// Therefore, we use TSD keys only after tsd_inited is set to true.
-// Until then, we use a slow path to get the heap object.
-static bool tsd_inited = false;
-#if USE(PTHREAD_GETSPECIFIC_DIRECT)
-static const pthread_key_t heap_key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0;
-#else
-static ThreadSpecificKey heap_key;
-#endif
-
-static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
+void* tryFastAlignedMalloc(size_t alignment, size_t size)
{
-#if USE(PTHREAD_GETSPECIFIC_DIRECT)
- // Can't have two libraries both doing this in the same process,
- // so check and make this crash right away.
- if (pthread_getspecific(heap_key))
- CRASH();
-#endif
-
-#if OS(DARWIN)
- // Still do pthread_setspecific even if there's an alternate form
- // of thread-local storage in use, to benefit from the delete callback.
- pthread_setspecific(heap_key, heap);
-#else
- threadSpecificSet(heap_key, heap);
-#endif
-}
-
-// Allocator for thread heaps
-static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
-
-// Linked list of heap objects. Protected by pageheap_lock.
-static TCMalloc_ThreadCache* thread_heaps = NULL;
-static int thread_heap_count = 0;
-
-// Overall thread cache size. Protected by pageheap_lock.
-static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
-
-// Global per-thread cache size. Writes are protected by
-// pageheap_lock. Reads are done without any locking, which should be
-// fine as long as size_t can be written atomically and we don't place
-// invariants between this variable and other pieces of state.
-static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
-
-//-------------------------------------------------------------------
-// Central cache implementation
-//-------------------------------------------------------------------
-
-void TCMalloc_Central_FreeList::Init(size_t cl, uintptr_t entropy) {
- lock_.Init();
- size_class_ = cl;
- entropy_ = entropy;
-#if ENABLE(TCMALLOC_HARDENING)
- ASSERT(entropy_);
-#endif
- DLL_Init(&empty_, entropy_);
- DLL_Init(&nonempty_, entropy_);
- counter_ = 0;
-
- cache_size_ = 1;
- used_slots_ = 0;
- ASSERT(cache_size_ <= kNumTransferEntries);
-}
-
-void TCMalloc_Central_FreeList::ReleaseListToSpans(HardenedSLL start) {
- while (start) {
- HardenedSLL next = SLL_Next(start, entropy_);
- ReleaseToSpans(start);
- start = next;
- }
-}
-
-ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(HardenedSLL object) {
- const PageID p = reinterpret_cast<uintptr_t>(object.value()) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
- ASSERT(span != NULL);
- ASSERT(span->refcount > 0);
-
- // If span is empty, move it to non-empty list
- if (!span->objects) {
- DLL_Remove(span, entropy_);
- DLL_Prepend(&nonempty_, span, entropy_);
- Event(span, 'N', 0);
- }
-
- // The following check is expensive, so it is disabled by default
- if (false) {
- // Check that object does not occur in list
- unsigned got = 0;
- for (HardenedSLL p = span->objects; !p; SLL_Next(p, entropy_)) {
- ASSERT(p.value() != object.value());
- got++;
- }
- ASSERT(got + span->refcount ==
- (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
- }
-
- counter_++;
- span->refcount--;
- if (span->refcount == 0) {
- Event(span, '#', 0);
- counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
- DLL_Remove(span, entropy_);
-
- // Release central list lock while operating on pageheap
- lock_.Unlock();
- {
- SpinLockHolder h(&pageheap_lock);
- pageheap->Delete(span);
- }
- lock_.Lock();
- } else {
- SLL_SetNext(object, span->objects, entropy_);
- span->objects.setValue(object.value());
- }
-}
-
-ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
- size_t locked_size_class, bool force) {
- static int race_counter = 0;
- int t = race_counter++; // Updated without a lock, but who cares.
- if (t >= static_cast<int>(kNumClasses)) {
- while (t >= static_cast<int>(kNumClasses)) {
- t -= kNumClasses;
- }
- race_counter = t;
- }
- ASSERT(t >= 0);
- ASSERT(t < static_cast<int>(kNumClasses));
- if (t == static_cast<int>(locked_size_class)) return false;
- return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
-}
-
-bool TCMalloc_Central_FreeList::MakeCacheSpace() {
- // Is there room in the cache?
- if (used_slots_ < cache_size_) return true;
- // Check if we can expand this cache?
- if (cache_size_ == kNumTransferEntries) return false;
- // Ok, we'll try to grab an entry from some other size class.
- if (EvictRandomSizeClass(size_class_, false) ||
- EvictRandomSizeClass(size_class_, true)) {
- // Succeeded in evicting, we're going to make our cache larger.
- cache_size_++;
- return true;
- }
- return false;
-}
-
-
-namespace {
-class LockInverter {
- private:
- SpinLock *held_, *temp_;
- public:
- inline explicit LockInverter(SpinLock* held, SpinLock *temp)
- : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
- inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
-};
-}
-
-bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
- // Start with a quick check without taking a lock.
- if (cache_size_ == 0) return false;
- // We don't evict from a full cache unless we are 'forcing'.
- if (force == false && used_slots_ == cache_size_) return false;
-
- // Grab lock, but first release the other lock held by this thread. We use
- // the lock inverter to ensure that we never hold two size class locks
- // concurrently. That can create a deadlock because there is no well
- // defined nesting order.
- LockInverter li(&central_cache[locked_size_class].lock_, &lock_);
- ASSERT(used_slots_ <= cache_size_);
- ASSERT(0 <= cache_size_);
- if (cache_size_ == 0) return false;
- if (used_slots_ == cache_size_) {
- if (force == false) return false;
- // ReleaseListToSpans releases the lock, so we have to make all the
- // updates to the central list before calling it.
- cache_size_--;
- used_slots_--;
- ReleaseListToSpans(tc_slots_[used_slots_].head);
- return true;
- }
- cache_size_--;
- return true;
-}
-
-void TCMalloc_Central_FreeList::InsertRange(HardenedSLL start, HardenedSLL end, int N) {
- SpinLockHolder h(&lock_);
- if (N == num_objects_to_move[size_class_] &&
- MakeCacheSpace()) {
- int slot = used_slots_++;
- ASSERT(slot >=0);
- ASSERT(slot < kNumTransferEntries);
- TCEntry *entry = &tc_slots_[slot];
- entry->head = start;
- entry->tail = end;
- return;
- }
- ReleaseListToSpans(start);
-}
-
-ALWAYS_INLINE void TCMalloc_Central_FreeList::RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N) {
- int num = *N;
- ASSERT(num > 0);
-
- SpinLockHolder h(&lock_);
- if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
- int slot = --used_slots_;
- ASSERT(slot >= 0);
- TCEntry *entry = &tc_slots_[slot];
- *start = entry->head;
- *end = entry->tail;
- return;
- }
-
- // TODO: Prefetch multiple TCEntries?
- HardenedSLL tail = FetchFromSpansSafe();
- if (!tail) {
- // We are completely out of memory.
- *start = *end = HardenedSLL::null();
- *N = 0;
- return;
- }
-
- SLL_SetNext(tail, HardenedSLL::null(), entropy_);
- HardenedSLL head = tail;
- int count = 1;
- while (count < num) {
- HardenedSLL t = FetchFromSpans();
- if (!t) break;
- SLL_Push(&head, t, entropy_);
- count++;
- }
- *start = head;
- *end = tail;
- *N = count;
-}
-
-
-ALWAYS_INLINE HardenedSLL TCMalloc_Central_FreeList::FetchFromSpansSafe() {
- HardenedSLL t = FetchFromSpans();
- if (!t) {
- Populate();
- t = FetchFromSpans();
- }
- return t;
-}
-
-HardenedSLL TCMalloc_Central_FreeList::FetchFromSpans() {
- if (DLL_IsEmpty(&nonempty_, entropy_)) return HardenedSLL::null();
- Span* span = nonempty_.next(entropy_);
-
- ASSERT(span->objects);
- ASSERT_SPAN_COMMITTED(span);
- span->refcount++;
- HardenedSLL result = span->objects;
- span->objects = SLL_Next(result, entropy_);
- if (!span->objects) {
- // Move to empty list
- DLL_Remove(span, entropy_);
- DLL_Prepend(&empty_, span, entropy_);
- Event(span, 'E', 0);
- }
- counter_--;
- return result;
-}
-
-// Fetch memory from the system and add to the central cache freelist.
-ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
- // Release central list lock while operating on pageheap
- lock_.Unlock();
- const size_t npages = class_to_pages[size_class_];
-
- Span* span;
- {
- SpinLockHolder h(&pageheap_lock);
- span = pageheap->New(npages);
- if (span) pageheap->RegisterSizeClass(span, size_class_);
- }
- if (span == NULL) {
-#if HAVE(ERRNO_H)
- MESSAGE("allocation failed: %d\n", errno);
-#elif OS(WINDOWS)
- MESSAGE("allocation failed: %d\n", ::GetLastError());
-#else
- MESSAGE("allocation failed\n");
-#endif
- lock_.Lock();
- return;
- }
- ASSERT_SPAN_COMMITTED(span);
- ASSERT(span->length == npages);
- // Cache sizeclass info eagerly. Locking is not necessary.
- // (Instead of being eager, we could just replace any stale info
- // about this span, but that seems to be no better in practice.)
- for (size_t i = 0; i < npages; i++) {
- pageheap->CacheSizeClass(span->start + i, size_class_);
- }
-
- // Split the block into pieces and add to the free-list
- // TODO: coloring of objects to avoid cache conflicts?
- HardenedSLL head = HardenedSLL::null();
- char* start = reinterpret_cast<char*>(span->start << kPageShift);
- const size_t size = ByteSizeForClass(size_class_);
- char* ptr = start + (npages << kPageShift) - ((npages << kPageShift) % size);
- int num = 0;
-#if ENABLE(TCMALLOC_HARDENING)
- uint32_t startPoison = freedObjectStartPoison();
- uint32_t endPoison = freedObjectEndPoison();
-#endif
-
- while (ptr > start) {
- ptr -= size;
- HardenedSLL node = HardenedSLL::create(ptr);
- POISON_DEALLOCATION_EXPLICIT(ptr, size, startPoison, endPoison);
- SLL_SetNext(node, head, entropy_);
- head = node;
- num++;
- }
- ASSERT(ptr == start);
- ASSERT(ptr == head.value());
-#ifndef NDEBUG
- {
- HardenedSLL node = head;
- while (node) {
- ASSERT(IS_DEFINITELY_POISONED(node.value(), size));
- node = SLL_Next(node, entropy_);
- }
- }
-#endif
- span->objects = head;
- ASSERT(span->objects.value() == head.value());
- span->refcount = 0; // No sub-object in use yet
-
- // Add span to list of non-empty spans
- lock_.Lock();
- DLL_Prepend(&nonempty_, span, entropy_);
- counter_ += num;
-}
-
-//-------------------------------------------------------------------
-// TCMalloc_ThreadCache implementation
-//-------------------------------------------------------------------
-
-inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
- if (bytes_until_sample_ < k) {
- PickNextSample(k);
- return true;
- } else {
- bytes_until_sample_ -= k;
- return false;
- }
-}
-
-void TCMalloc_ThreadCache::Init(ThreadIdentifier tid, uintptr_t entropy) {
- size_ = 0;
- next_ = NULL;
- prev_ = NULL;
- tid_ = tid;
- in_setspecific_ = false;
- entropy_ = entropy;
-#if ENABLE(TCMALLOC_HARDENING)
- ASSERT(entropy_);
-#endif
- for (size_t cl = 0; cl < kNumClasses; ++cl) {
- list_[cl].Init(entropy_);
- }
-
- // Initialize RNG -- run it for a bit to get to good values
- bytes_until_sample_ = 0;
- rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
- for (int i = 0; i < 100; i++) {
- PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
- }
-}
-
-void TCMalloc_ThreadCache::Cleanup() {
- // Put unused memory back into central cache
- for (size_t cl = 0; cl < kNumClasses; ++cl) {
- if (list_[cl].length() > 0) {
- ReleaseToCentralCache(cl, list_[cl].length());
- }
- }
-}
-
-ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
- ASSERT(size <= kMaxSize);
- const size_t cl = SizeClass(size);
- FreeList* list = &list_[cl];
- size_t allocationSize = ByteSizeForClass(cl);
- if (list->empty()) {
- FetchFromCentralCache(cl, allocationSize);
- if (list->empty()) return NULL;
- }
- size_ -= allocationSize;
- void* result = list->Pop();
- if (!result)
- return 0;
- RELEASE_ASSERT(IS_DEFINITELY_POISONED(result, allocationSize));
- POISON_ALLOCATION(result, allocationSize);
- return result;
-}
-
-inline void TCMalloc_ThreadCache::Deallocate(HardenedSLL ptr, size_t cl) {
- size_t allocationSize = ByteSizeForClass(cl);
- size_ += allocationSize;
- FreeList* list = &list_[cl];
- if (MAY_BE_POISONED(ptr.value(), allocationSize))
- list->Validate(ptr, allocationSize);
-
- POISON_DEALLOCATION(ptr.value(), allocationSize);
- list->Push(ptr);
- // If enough data is free, put back into central cache
- if (list->length() > kMaxFreeListLength) {
- ReleaseToCentralCache(cl, num_objects_to_move[cl]);
- }
- if (size_ >= per_thread_cache_size) Scavenge();
-}
-
-// Remove some objects of class "cl" from central cache and add to thread heap
-ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
- int fetch_count = num_objects_to_move[cl];
- HardenedSLL start, end;
- central_cache[cl].RemoveRange(&start, &end, &fetch_count);
- list_[cl].PushRange(fetch_count, start, end);
- size_ += allocationSize * fetch_count;
-}
-
-// Remove some objects of class "cl" from thread heap and add to central cache
-inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
- ASSERT(N > 0);
- FreeList* src = &list_[cl];
- if (N > src->length()) N = src->length();
- size_ -= N*ByteSizeForClass(cl);
-
- // We return prepackaged chains of the correct size to the central cache.
- // TODO: Use the same format internally in the thread caches?
- int batch_size = num_objects_to_move[cl];
- while (N > batch_size) {
- HardenedSLL tail, head;
- src->PopRange(batch_size, &head, &tail);
- central_cache[cl].InsertRange(head, tail, batch_size);
- N -= batch_size;
- }
- HardenedSLL tail, head;
- src->PopRange(N, &head, &tail);
- central_cache[cl].InsertRange(head, tail, N);
-}
-
-// Release idle memory to the central cache
-inline void TCMalloc_ThreadCache::Scavenge() {
- // If the low-water mark for the free list is L, it means we would
- // not have had to allocate anything from the central cache even if
- // we had reduced the free list size by L. We aim to get closer to
- // that situation by dropping L/2 nodes from the free list. This
- // may not release much memory, but if so we will call scavenge again
- // pretty soon and the low-water marks will be high on that call.
- //int64 start = CycleClock::Now();
-
- for (size_t cl = 0; cl < kNumClasses; cl++) {
- FreeList* list = &list_[cl];
- const int lowmark = list->lowwatermark();
- if (lowmark > 0) {
- const int drop = (lowmark > 1) ? lowmark/2 : 1;
- ReleaseToCentralCache(cl, drop);
- }
- list->clear_lowwatermark();
- }
-
- //int64 finish = CycleClock::Now();
- //CycleTimer ct;
- //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
-}
-
-void TCMalloc_ThreadCache::PickNextSample(size_t k) {
- // Make next "random" number
- // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
- static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
- uint32_t r = rnd_;
- rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
-
- // Next point is "rnd_ % (sample_period)". I.e., average
- // increment is "sample_period/2".
- const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
- static int last_flag_value = -1;
-
- if (flag_value != last_flag_value) {
- SpinLockHolder h(&sample_period_lock);
- int i;
- for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
- if (primes_list[i] >= flag_value) {
- break;
- }
- }
- sample_period = primes_list[i];
- last_flag_value = flag_value;
- }
-
- bytes_until_sample_ += rnd_ % sample_period;
-
- if (k > (static_cast<size_t>(-1) >> 2)) {
- // If the user has asked for a huge allocation then it is possible
- // for the code below to loop infinitely. Just return (note that
- // this throws off the sampling accuracy somewhat, but a user who
- // is allocating more than 1G of memory at a time can live with a
- // minor inaccuracy in profiling of small allocations, and also
- // would rather not wait for the loop below to terminate).
- return;
- }
-
- while (bytes_until_sample_ < k) {
- // Increase bytes_until_sample_ by enough average sampling periods
- // (sample_period >> 1) to allow us to sample past the current
- // allocation.
- bytes_until_sample_ += (sample_period >> 1);
- }
-
- bytes_until_sample_ -= k;
-}
-
-void TCMalloc_ThreadCache::InitModule() {
- // There is a slight potential race here because of double-checked
- // locking idiom. However, as long as the program does a small
- // allocation before switching to multi-threaded mode, we will be
- // fine. We increase the chances of doing such a small allocation
- // by doing one in the constructor of the module_enter_exit_hook
- // object declared below.
- SpinLockHolder h(&pageheap_lock);
- if (!phinited) {
- uintptr_t entropy = HARDENING_ENTROPY;
-#ifdef WTF_CHANGES
- InitTSD();
-#endif
- InitSizeClasses();
- threadheap_allocator.Init(entropy);
- span_allocator.Init(entropy);
- span_allocator.New(); // Reduce cache conflicts
- span_allocator.New(); // Reduce cache conflicts
- stacktrace_allocator.Init(entropy);
- DLL_Init(&sampled_objects, entropy);
- for (size_t i = 0; i < kNumClasses; ++i) {
- central_cache[i].Init(i, entropy);
- }
- pageheap->init();
- phinited = 1;
-#if defined(WTF_CHANGES) && OS(DARWIN)
- MallocHook::init();
- FastMallocZone::init();
-#endif
- }
-}
-
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid, uintptr_t entropy) {
- // Create the heap and add it to the linked list
- TCMalloc_ThreadCache *heap = threadheap_allocator.New();
- heap->Init(tid, entropy);
- heap->next_ = thread_heaps;
- heap->prev_ = NULL;
- if (thread_heaps != NULL) thread_heaps->prev_ = heap;
- thread_heaps = heap;
- thread_heap_count++;
- RecomputeThreadCacheSize();
- return heap;
-}
-
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
-#ifdef HAVE_TLS
- // __thread is faster, but only when the kernel supports it
- if (KernelSupportsTLS())
- return threadlocal_heap;
-#elif OS(DARWIN)
- return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
-#else
- return static_cast<TCMalloc_ThreadCache*>(threadSpecificGet(heap_key));
-#endif
-}
-
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
- TCMalloc_ThreadCache* ptr = NULL;
- if (!tsd_inited) {
- InitModule();
- } else {
- ptr = GetThreadHeap();
- }
- if (ptr == NULL) ptr = CreateCacheIfNecessary();
- return ptr;
-}
-
-// In deletion paths, we do not try to create a thread-cache. This is
-// because we may be in the thread destruction code and may have
-// already cleaned up the cache for this thread.
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
- if (!tsd_inited) return NULL;
- void* const p = GetThreadHeap();
- return reinterpret_cast<TCMalloc_ThreadCache*>(p);
-}
-
-void TCMalloc_ThreadCache::InitTSD() {
- ASSERT(!tsd_inited);
-#if USE(PTHREAD_GETSPECIFIC_DIRECT)
- pthread_key_init_np(heap_key, DestroyThreadCache);
-#else
- threadSpecificKeyCreate(&heap_key, DestroyThreadCache);
-#endif
- tsd_inited = true;
-
-#if !OS(WINDOWS)
- // We may have used a fake pthread_t for the main thread. Fix it.
- pthread_t zero;
- memset(&zero, 0, sizeof(zero));
-#endif
-#ifndef WTF_CHANGES
- SpinLockHolder h(&pageheap_lock);
-#else
- ASSERT(pageheap_lock.IsHeld());
-#endif
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
-#if OS(WINDOWS)
- if (h->tid_ == 0) {
- h->tid_ = GetCurrentThreadId();
- }
-#else
- if (pthread_equal(h->tid_, zero)) {
- h->tid_ = pthread_self();
- }
-#endif
- }
-}
-
-TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
- // Initialize per-thread data if necessary
- TCMalloc_ThreadCache* heap = NULL;
- {
- SpinLockHolder h(&pageheap_lock);
-
-#if OS(WINDOWS)
- DWORD me;
- if (!tsd_inited) {
- me = 0;
- } else {
- me = GetCurrentThreadId();
- }
-#else
- // Early on in glibc's life, we cannot even call pthread_self()
- pthread_t me;
- if (!tsd_inited) {
- memset(&me, 0, sizeof(me));
- } else {
- me = pthread_self();
- }
-#endif
-
- // This may be a recursive malloc call from pthread_setspecific()
- // In that case, the heap for this thread has already been created
- // and added to the linked list. So we search for that first.
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
-#if OS(WINDOWS)
- if (h->tid_ == me) {
-#else
- if (pthread_equal(h->tid_, me)) {
-#endif
- heap = h;
- break;
- }
- }
-
- if (heap == NULL) heap = NewHeap(me, HARDENING_ENTROPY);
- }
-
- // We call pthread_setspecific() outside the lock because it may
- // call malloc() recursively. The recursive call will never get
- // here again because it will find the already allocated heap in the
- // linked list of heaps.
- if (!heap->in_setspecific_ && tsd_inited) {
- heap->in_setspecific_ = true;
- setThreadHeap(heap);
- }
- return heap;
+ FAIL_IF_EXCEEDS_LIMIT(size);
+ return bmalloc::api::tryMemalign(alignment, size);
}
-void TCMalloc_ThreadCache::BecomeIdle() {
- if (!tsd_inited) return; // No caches yet
- TCMalloc_ThreadCache* heap = GetThreadHeap();
- if (heap == NULL) return; // No thread cache to remove
- if (heap->in_setspecific_) return; // Do not disturb the active caller
-
- heap->in_setspecific_ = true;
- setThreadHeap(NULL);
-#ifdef HAVE_TLS
- // Also update the copy in __thread
- threadlocal_heap = NULL;
-#endif
- heap->in_setspecific_ = false;
- if (GetThreadHeap() == heap) {
- // Somehow heap got reinstated by a recursive call to malloc
- // from pthread_setspecific. We give up in this case.
- return;
- }
-
- // We can now get rid of the heap
- DeleteCache(heap);
-}
-
-void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
- // Note that "ptr" cannot be NULL since pthread promises not
- // to invoke the destructor on NULL values, but for safety,
- // we check anyway.
- if (ptr == NULL) return;
-#ifdef HAVE_TLS
- // Prevent fast path of GetThreadHeap() from returning heap.
- threadlocal_heap = NULL;
-#endif
- DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
-}
-
-void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
- // Remove all memory from heap
- heap->Cleanup();
-
- // Remove from linked list
- SpinLockHolder h(&pageheap_lock);
- if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
- if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
- if (thread_heaps == heap) thread_heaps = heap->next_;
- thread_heap_count--;
- RecomputeThreadCacheSize();
-
- threadheap_allocator.Delete(heap);
-}
-
-void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
- // Divide available space across threads
- int n = thread_heap_count > 0 ? thread_heap_count : 1;
- size_t space = overall_thread_cache_size / n;
-
- // Limit to allowed range
- if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
- if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
-
- per_thread_cache_size = space;
-}
-
-void TCMalloc_ThreadCache::Print() const {
- for (size_t cl = 0; cl < kNumClasses; ++cl) {
- MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
- ByteSizeForClass(cl),
- list_[cl].length(),
- list_[cl].lowwatermark());
- }
-}
-
-// Extract interesting stats
-struct TCMallocStats {
- uint64_t system_bytes; // Bytes alloced from system
- uint64_t thread_bytes; // Bytes in thread caches
- uint64_t central_bytes; // Bytes in central cache
- uint64_t transfer_bytes; // Bytes in central transfer cache
- uint64_t pageheap_bytes; // Bytes in page heap
- uint64_t metadata_bytes; // Bytes alloced for metadata
-};
-
-#ifndef WTF_CHANGES
-// Get stats into "r". Also get per-size-class counts if class_count != NULL
-static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
- r->central_bytes = 0;
- r->transfer_bytes = 0;
- for (int cl = 0; cl < kNumClasses; ++cl) {
- const int length = central_cache[cl].length();
- const int tc_length = central_cache[cl].tc_length();
- r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
- r->transfer_bytes +=
- static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
- if (class_count) class_count[cl] = length + tc_length;
- }
-
- // Add stats from per-thread heaps
- r->thread_bytes = 0;
- { // scope
- SpinLockHolder h(&pageheap_lock);
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
- r->thread_bytes += h->Size();
- if (class_count) {
- for (size_t cl = 0; cl < kNumClasses; ++cl) {
- class_count[cl] += h->freelist_length(cl);
- }
- }
- }
- }
-
- { //scope
- SpinLockHolder h(&pageheap_lock);
- r->system_bytes = pageheap->SystemBytes();
- r->metadata_bytes = metadata_system_bytes;
- r->pageheap_bytes = pageheap->FreeBytes();
- }
-}
-#endif
-
-#ifndef WTF_CHANGES
-// WRITE stats to "out"
-static void DumpStats(TCMalloc_Printer* out, int level) {
- TCMallocStats stats;
- uint64_t class_count[kNumClasses];
- ExtractStats(&stats, (level >= 2 ? class_count : NULL));
-
- if (level >= 2) {
- out->printf("------------------------------------------------\n");
- uint64_t cumulative = 0;
- for (int cl = 0; cl < kNumClasses; ++cl) {
- if (class_count[cl] > 0) {
- uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
- cumulative += class_bytes;
- out->printf("class %3d [ %8" PRIuS " bytes ] : "
- "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
- cl, ByteSizeForClass(cl),
- class_count[cl],
- class_bytes / 1048576.0,
- cumulative / 1048576.0);
- }
- }
-
- SpinLockHolder h(&pageheap_lock);
- pageheap->Dump(out);
- }
-
- const uint64_t bytes_in_use = stats.system_bytes
- - stats.pageheap_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.thread_bytes;
-
- out->printf("------------------------------------------------\n"
- "MALLOC: %12" PRIu64 " Heap size\n"
- "MALLOC: %12" PRIu64 " Bytes in use by application\n"
- "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
- "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
- "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
- "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
- "MALLOC: %12" PRIu64 " Spans in use\n"
- "MALLOC: %12" PRIu64 " Thread heaps in use\n"
- "MALLOC: %12" PRIu64 " Metadata allocated\n"
- "------------------------------------------------\n",
- stats.system_bytes,
- bytes_in_use,
- stats.pageheap_bytes,
- stats.central_bytes,
- stats.transfer_bytes,
- stats.thread_bytes,
- uint64_t(span_allocator.inuse()),
- uint64_t(threadheap_allocator.inuse()),
- stats.metadata_bytes);
-}
-
-static void PrintStats(int level) {
- const int kBufferSize = 16 << 10;
- char* buffer = new char[kBufferSize];
- TCMalloc_Printer printer(buffer, kBufferSize);
- DumpStats(&printer, level);
- write(STDERR_FILENO, buffer, strlen(buffer));
- delete[] buffer;
-}
-
-static void** DumpStackTraces() {
- // Count how much space we need
- int needed_slots = 0;
- {
- SpinLockHolder h(&pageheap_lock);
- for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
- StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
- needed_slots += 3 + stack->depth;
- }
- needed_slots += 100; // Slop in case sample grows
- needed_slots += needed_slots/8; // An extra 12.5% slop
- }
-
- void** result = new void*[needed_slots];
- if (result == NULL) {
- MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
- needed_slots);
- return NULL;
- }
-
- SpinLockHolder h(&pageheap_lock);
- int used_slots = 0;
- for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
- ASSERT_WITH_SECURITY_IMPLICATION(used_slots < needed_slots); // Need to leave room for terminator
- StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
- if (used_slots + 3 + stack->depth >= needed_slots) {
- // No more room
- break;
- }
-
- result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
- result[used_slots+1] = reinterpret_cast<void*>(stack->size);
- result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
- for (int d = 0; d < stack->depth; d++) {
- result[used_slots+3+d] = stack->stack[d];
- }
- used_slots += 3 + stack->depth;
- }
- result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
- return result;
-}
-#endif
-
-#ifndef WTF_CHANGES
-
-// TCMalloc's support for extra malloc interfaces
-class TCMallocImplementation : public MallocExtension {
- public:
- virtual void GetStats(char* buffer, int buffer_length) {
- ASSERT(buffer_length > 0);
- TCMalloc_Printer printer(buffer, buffer_length);
-
- // Print level one stats unless lots of space is available
- if (buffer_length < 10000) {
- DumpStats(&printer, 1);
- } else {
- DumpStats(&printer, 2);
- }
- }
-
- virtual void** ReadStackTraces() {
- return DumpStackTraces();
- }
-
- virtual bool GetNumericProperty(const char* name, size_t* value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "generic.current_allocated_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.pageheap_bytes;
- return true;
- }
-
- if (strcmp(name, "generic.heap_size") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes;
- return true;
- }
-
- if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
- // We assume that bytes in the page heap are not fragmented too
- // badly, and are therefore available for allocation.
- SpinLockHolder l(&pageheap_lock);
- *value = pageheap->FreeBytes();
- return true;
- }
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- SpinLockHolder l(&pageheap_lock);
- *value = overall_thread_cache_size;
- return true;
- }
-
- if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.thread_bytes;
- return true;
- }
-
- return false;
- }
-
- virtual bool SetNumericProperty(const char* name, size_t value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- // Clip the value to a reasonable range
- if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
- if (value > (1<<30)) value = (1<<30); // Limit to 1GB
-
- SpinLockHolder l(&pageheap_lock);
- overall_thread_cache_size = static_cast<size_t>(value);
- TCMalloc_ThreadCache::RecomputeThreadCacheSize();
- return true;
- }
-
- return false;
- }
-
- virtual void MarkThreadIdle() {
- TCMalloc_ThreadCache::BecomeIdle();
- }
-
- virtual void ReleaseFreeMemory() {
- SpinLockHolder h(&pageheap_lock);
- pageheap->ReleaseFreePages();
- }
-};
-#endif
-
-// The constructor allocates an object to ensure that initialization
-// runs before main(), and therefore we do not have a chance to become
-// multi-threaded before initialization. We also create the TSD key
-// here. Presumably by the time this constructor runs, glibc is in
-// good enough shape to handle pthread_key_create().
-//
-// The constructor also takes the opportunity to tell STL to use
-// tcmalloc. We want to do this early, before construct time, so
-// all user STL allocations go through tcmalloc (which works really
-// well for STL).
-//
-// The destructor prints stats when the program exits.
-class TCMallocGuard {
- public:
-
- TCMallocGuard() {
-#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
- // Check whether the kernel also supports TLS (needs to happen at runtime)
- CheckIfKernelSupportsTLS();
-#endif
-#ifndef WTF_CHANGES
-#ifdef WIN32 // patch the windows VirtualAlloc, etc.
- PatchWindowsFunctions(); // defined in windows/patch_functions.cc
-#endif
-#endif
- free(malloc(1));
- TCMalloc_ThreadCache::InitTSD();
- free(malloc(1));
-#ifndef WTF_CHANGES
- MallocExtension::Register(new TCMallocImplementation);
-#endif
- }
-
-#ifndef WTF_CHANGES
- ~TCMallocGuard() {
- const char* env = getenv("MALLOCSTATS");
- if (env != NULL) {
- int level = atoi(env);
- if (level < 1) level = 1;
- PrintStats(level);
- }
-#ifdef WIN32
- UnpatchWindowsFunctions();
-#endif
- }
-#endif
-};
-
-#ifndef WTF_CHANGES
-static TCMallocGuard module_enter_exit_hook;
-#endif
-
-
-//-------------------------------------------------------------------
-// Helpers for the exported routines below
-//-------------------------------------------------------------------
-
-#ifndef WTF_CHANGES
-
-static Span* DoSampledAllocation(size_t size) {
-
- // Grab the stack trace outside the heap lock
- StackTrace tmp;
- tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
- tmp.size = size;
-
- SpinLockHolder h(&pageheap_lock);
- // Allocate span
- Span *span = pageheap->New(pages(size == 0 ? 1 : size));
- if (span == NULL) {
- return NULL;
- }
-
- // Allocate stack trace
- StackTrace *stack = stacktrace_allocator.New();
- if (stack == NULL) {
- // Sampling failed because of lack of memory
- return span;
- }
-
- *stack = tmp;
- span->sample = 1;
- span->objects = stack;
- DLL_Prepend(&sampled_objects, span);
-
- return span;
-}
-#endif
-
-#if !ASSERT_DISABLED
-static inline bool CheckCachedSizeClass(void *ptr) {
- PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- size_t cached_value = pageheap->GetSizeClassIfCached(p);
- return cached_value == 0 ||
- cached_value == pageheap->GetDescriptor(p)->sizeclass;
-}
-#endif
-
-static inline void* CheckedMallocResult(void *result)
-{
- ASSERT(result == 0 || CheckCachedSizeClass(result));
- return result;
-}
-
-static inline void* SpanToMallocResult(Span *span) {
- ASSERT_SPAN_COMMITTED(span);
- pageheap->CacheSizeClass(span->start, 0);
- void* result = reinterpret_cast<void*>(span->start << kPageShift);
- POISON_ALLOCATION(result, span->length << kPageShift);
- return CheckedMallocResult(result);
-}
-
-#ifdef WTF_CHANGES
-template <bool crashOnFailure>
-#endif
-static ALWAYS_INLINE void* do_malloc(size_t size) {
- void* ret = NULL;
-
-#ifdef WTF_CHANGES
- ASSERT(!isForbidden());
-#endif
-
- // The following call forces module initialization
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
-#ifndef WTF_CHANGES
- if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
- Span* span = DoSampledAllocation(size);
- if (span != NULL) {
- ret = SpanToMallocResult(span);
- }
- } else
-#endif
- if (size > kMaxSize) {
- // Use page-level allocator
- SpinLockHolder h(&pageheap_lock);
- Span* span = pageheap->New(pages(size));
- if (span != NULL) {
- ret = SpanToMallocResult(span);
- }
- } else {
- // The common case, and also the simplest. This just pops the
- // size-appropriate freelist, afer replenishing it if it's empty.
- ret = CheckedMallocResult(heap->Allocate(size));
- }
- if (!ret) {
-#ifdef WTF_CHANGES
- if (crashOnFailure) // This branch should be optimized out by the compiler.
- CRASH();
-#else
- errno = ENOMEM;
-#endif
- }
- return ret;
-}
-
-static ALWAYS_INLINE void do_free(void* ptr) {
- if (ptr == NULL) return;
- ASSERT(pageheap != NULL); // Should not call free() before malloc()
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
- RELEASE_ASSERT(span->isValid());
- size_t cl = span->sizeclass;
-
- if (cl) {
- size_t byteSizeForClass = ByteSizeForClass(cl);
- RELEASE_ASSERT(!((reinterpret_cast<char*>(ptr) - reinterpret_cast<char*>(span->start << kPageShift)) % byteSizeForClass));
- pageheap->CacheSizeClass(p, cl);
-
-#ifndef NO_TCMALLOC_SAMPLES
- ASSERT(!pageheap->GetDescriptor(p)->sample);
-#endif
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
- if (heap != NULL) {
- heap->Deallocate(HardenedSLL::create(ptr), cl);
- } else {
- // Delete directly into central cache
- POISON_DEALLOCATION(ptr, byteSizeForClass);
- SLL_SetNext(HardenedSLL::create(ptr), HardenedSLL::null(), central_cache[cl].entropy());
- central_cache[cl].InsertRange(HardenedSLL::create(ptr), HardenedSLL::create(ptr), 1);
- }
- } else {
- SpinLockHolder h(&pageheap_lock);
- ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
- ASSERT(span != NULL && span->start == p);
-#ifndef NO_TCMALLOC_SAMPLES
- if (span->sample) {
- DLL_Remove(span);
- stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
- span->objects = NULL;
- }
-#endif
- RELEASE_ASSERT(reinterpret_cast<void*>(span->start << kPageShift) == ptr);
- POISON_DEALLOCATION(ptr, span->length << kPageShift);
- pageheap->Delete(span);
- }
-}
-
-#ifndef WTF_CHANGES
-// For use by exported routines below that want specific alignments
-//
-// Note: this code can be slow, and can significantly fragment memory.
-// The expectation is that memalign/posix_memalign/valloc/pvalloc will
-// not be invoked very often. This requirement simplifies our
-// implementation and allows us to tune for expected allocation
-// patterns.
-static void* do_memalign(size_t align, size_t size) {
- ASSERT((align & (align - 1)) == 0);
- ASSERT(align > 0);
- if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
-
- // Allocate at least one byte to avoid boundary conditions below
- if (size == 0) size = 1;
-
- if (size <= kMaxSize && align < kPageSize) {
- // Search through acceptable size classes looking for one with
- // enough alignment. This depends on the fact that
- // InitSizeClasses() currently produces several size classes that
- // are aligned at powers of two. We will waste time and space if
- // we miss in the size class array, but that is deemed acceptable
- // since memalign() should be used rarely.
- size_t cl = SizeClass(size);
- while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
- cl++;
- }
- if (cl < kNumClasses) {
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
- return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
- }
- }
-
- // We will allocate directly from the page heap
- SpinLockHolder h(&pageheap_lock);
-
- if (align <= kPageSize) {
- // Any page-level allocation will be fine
- // TODO: We could put the rest of this page in the appropriate
- // TODO: cache but it does not seem worth it.
- Span* span = pageheap->New(pages(size));
- return span == NULL ? NULL : SpanToMallocResult(span);
- }
-
- // Allocate extra pages and carve off an aligned portion
- const Length alloc = pages(size + align);
- Span* span = pageheap->New(alloc);
- if (span == NULL) return NULL;
-
- // Skip starting portion so that we end up aligned
- Length skip = 0;
- while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
- skip++;
- }
- ASSERT_WITH_SECURITY_IMPLICATION(skip < alloc);
- if (skip > 0) {
- Span* rest = pageheap->Split(span, skip);
- pageheap->Delete(span);
- span = rest;
- }
-
- // Skip trailing portion that we do not need to return
- const Length needed = pages(size);
- ASSERT(span->length >= needed);
- if (span->length > needed) {
- Span* trailer = pageheap->Split(span, needed);
- pageheap->Delete(trailer);
- }
- return SpanToMallocResult(span);
-}
-#endif
-
-// Helpers for use by exported routines below:
-
-#ifndef WTF_CHANGES
-static inline void do_malloc_stats() {
- PrintStats(1);
-}
-
-static inline int do_mallopt(int, int) {
- return 1; // Indicates error
-}
-#endif
-
-#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
-static inline struct mallinfo do_mallinfo() {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
-
- // Just some of the fields are filled in.
- struct mallinfo info;
- memset(&info, 0, sizeof(info));
-
- // Unfortunately, the struct contains "int" field, so some of the
- // size values will be truncated.
- info.arena = static_cast<int>(stats.system_bytes);
- info.fsmblks = static_cast<int>(stats.thread_bytes
- + stats.central_bytes
- + stats.transfer_bytes);
- info.fordblks = static_cast<int>(stats.pageheap_bytes);
- info.uordblks = static_cast<int>(stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.pageheap_bytes);
-
- return info;
-}
-#endif
-
-//-------------------------------------------------------------------
-// Exported routines
-//-------------------------------------------------------------------
-
-// CAVEAT: The code structure below ensures that MallocHook methods are always
-// called from the stack frame of the invoked allocation function.
-// heap-checker.cc depends on this to start a stack trace from
-// the call to the (de)allocation function.
-
-#ifndef WTF_CHANGES
-extern "C"
-#else
-#define do_malloc do_malloc<crashOnFailure>
-
-template <bool crashOnFailure>
-ALWAYS_INLINE void* malloc(size_t);
-
-void* fastMalloc(size_t size)
+void fastAlignedFree(void* p)
{
- void* result = malloc<true>(size);
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("fastMalloc allocating %lu bytes (fastMalloc): %p.\n", size, result);
-#endif
- return result;
+ bmalloc::api::free(p);
}
TryMallocReturnValue tryFastMalloc(size_t size)
{
- TryMallocReturnValue result = malloc<false>(size);
-#if ENABLE(ALLOCATION_LOGGING)
- void* pointer;
- (void)result.getValue(pointer);
- dataLogF("fastMalloc allocating %lu bytes (tryFastMalloc): %p.\n", size, pointer);
-#endif
- return result;
+ FAIL_IF_EXCEEDS_LIMIT(size);
+ return bmalloc::api::tryMalloc(size);
}
-
-template <bool crashOnFailure>
-ALWAYS_INLINE
-#endif
-void* malloc(size_t size) {
-#if ENABLE(WTF_MALLOC_VALIDATION)
- if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= size) // If overflow would occur...
- return 0;
- void* result = do_malloc(size + Internal::ValidationBufferSize);
- if (!result)
- return 0;
-
- Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
- header->m_size = size;
- header->m_type = Internal::AllocTypeMalloc;
- header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
- result = header + 1;
- *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
- fastMallocValidate(result);
-#else
- void* result = do_malloc(size);
-#endif
-
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-#ifndef WTF_CHANGES
-extern "C"
-#endif
-void free(void* ptr) {
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("fastFree freeing %p.\n", ptr);
-#endif
- MallocHook::InvokeDeleteHook(ptr);
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- if (!ptr)
- return;
-
- fastMallocValidate(ptr);
- Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(ptr);
- memset(ptr, 0xCC, header->m_size);
- do_free(header);
-#else
- do_free(ptr);
-#endif
-}
-
-#ifndef WTF_CHANGES
-extern "C"
-#else
-template <bool crashOnFailure>
-ALWAYS_INLINE void* calloc(size_t, size_t);
-
-void* fastCalloc(size_t n, size_t elem_size)
+TryMallocReturnValue tryFastCalloc(size_t numElements, size_t elementSize)
{
- void* result = calloc<true>(n, elem_size);
-#if ENABLE(WTF_MALLOC_VALIDATION)
- fastMallocValidate(result);
-#endif
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("fastMalloc contiguously allocating %lu * %lu bytes (fastCalloc): %p.\n", n, elem_size, result);
-#endif
- return result;
+ FAIL_IF_EXCEEDS_LIMIT(numElements * elementSize);
+ Checked<size_t, RecordOverflow> checkedSize = elementSize;
+ checkedSize *= numElements;
+ if (checkedSize.hasOverflowed())
+ return nullptr;
+ return tryFastZeroedMalloc(checkedSize.unsafeGet());
}
-
-TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
-{
- void* result = calloc<false>(n, elem_size);
-#if ENABLE(WTF_MALLOC_VALIDATION)
- fastMallocValidate(result);
-#endif
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("fastMalloc contiguously allocating %lu * %lu bytes (tryFastCalloc): %p.\n", n, elem_size, result);
-#endif
- return result;
-}
-
-template <bool crashOnFailure>
-ALWAYS_INLINE
-#endif
-void* calloc(size_t n, size_t elem_size) {
- size_t totalBytes = n * elem_size;
- // Protect against overflow
- if (n > 1 && elem_size && (totalBytes / elem_size) != n)
- return 0;
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- void* result = malloc<crashOnFailure>(totalBytes);
- if (!result)
- return 0;
-
- memset(result, 0, totalBytes);
- fastMallocValidate(result);
-#else
- void* result = do_malloc(totalBytes);
- if (result != NULL) {
- memset(result, 0, totalBytes);
- }
-#endif
-
- MallocHook::InvokeNewHook(result, totalBytes);
- return result;
-}
-
-// Since cfree isn't used anywhere, we don't compile it in.
-#ifndef WTF_CHANGES
-#ifndef WTF_CHANGES
-extern "C"
-#endif
-void cfree(void* ptr) {
-#ifndef WTF_CHANGES
- MallocHook::InvokeDeleteHook(ptr);
-#endif
- do_free(ptr);
-}
-#endif
-
-#ifndef WTF_CHANGES
-extern "C"
-#else
-template <bool crashOnFailure>
-ALWAYS_INLINE void* realloc(void*, size_t);
-
-void* fastRealloc(void* old_ptr, size_t new_size)
+void releaseFastMallocFreeMemoryForThisThread()
{
-#if ENABLE(WTF_MALLOC_VALIDATION)
- fastMallocValidate(old_ptr);
-#endif
- void* result = realloc<true>(old_ptr, new_size);
-#if ENABLE(WTF_MALLOC_VALIDATION)
- fastMallocValidate(result);
-#endif
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("fastMalloc reallocating %lu bytes (fastRealloc): %p -> %p.\n", new_size, old_ptr, result);
-#endif
- return result;
-}
-
-TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
-{
-#if ENABLE(WTF_MALLOC_VALIDATION)
- fastMallocValidate(old_ptr);
-#endif
- void* result = realloc<false>(old_ptr, new_size);
-#if ENABLE(WTF_MALLOC_VALIDATION)
- fastMallocValidate(result);
-#endif
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("fastMalloc reallocating %lu bytes (tryFastRealloc): %p -> %p.\n", new_size, old_ptr, result);
-#endif
- return result;
-}
-
-template <bool crashOnFailure>
-ALWAYS_INLINE
-#endif
-void* realloc(void* old_ptr, size_t new_size) {
- if (old_ptr == NULL) {
-#if ENABLE(WTF_MALLOC_VALIDATION)
- void* result = malloc<crashOnFailure>(new_size);
-#else
- void* result = do_malloc(new_size);
- MallocHook::InvokeNewHook(result, new_size);
-#endif
- return result;
- }
- if (new_size == 0) {
- MallocHook::InvokeDeleteHook(old_ptr);
- free(old_ptr);
- return NULL;
- }
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
- if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= new_size) // If overflow would occur...
- return 0;
- Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(old_ptr);
- fastMallocValidate(old_ptr);
- old_ptr = header;
- header->m_size = new_size;
- new_size += Internal::ValidationBufferSize;
-#endif
-
- // Get the size of the old entry
- const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
- size_t cl = pageheap->GetSizeClassIfCached(p);
- Span *span = NULL;
- size_t old_size;
- if (cl == 0) {
- span = pageheap->GetDescriptor(p);
- cl = span->sizeclass;
- pageheap->CacheSizeClass(p, cl);
- }
- if (cl != 0) {
- old_size = ByteSizeForClass(cl);
- } else {
- ASSERT(span != NULL);
- old_size = span->length << kPageShift;
- }
-
- // Reallocate if the new size is larger than the old size,
- // or if the new size is significantly smaller than the old size.
- if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
- // Need to reallocate
- void* new_ptr = do_malloc(new_size);
- if (new_ptr == NULL) {
- return NULL;
- }
- MallocHook::InvokeNewHook(new_ptr, new_size);
- memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
- MallocHook::InvokeDeleteHook(old_ptr);
- // We could use a variant of do_free() that leverages the fact
- // that we already know the sizeclass of old_ptr. The benefit
- // would be small, so don't bother.
- do_free(old_ptr);
-#if ENABLE(WTF_MALLOC_VALIDATION)
- new_ptr = static_cast<Internal::ValidationHeader*>(new_ptr) + 1;
- *Internal::fastMallocValidationSuffix(new_ptr) = Internal::ValidationSuffix;
-#endif
- return new_ptr;
- } else {
-#if ENABLE(WTF_MALLOC_VALIDATION)
- old_ptr = static_cast<Internal::ValidationHeader*>(old_ptr) + 1; // Set old_ptr back to the user pointer.
- *Internal::fastMallocValidationSuffix(old_ptr) = Internal::ValidationSuffix;
-#endif
- return old_ptr;
- }
-}
-
-#ifdef WTF_CHANGES
-#undef do_malloc
-#else
-
-static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
-
-static inline void* cpp_alloc(size_t size, bool nothrow) {
- for (;;) {
- void* p = do_malloc(size);
-#ifdef PREANSINEW
- return p;
-#else
- if (p == NULL) { // allocation failed
- // Get the current new handler. NB: this function is not
- // thread-safe. We make a feeble stab at making it so here, but
- // this lock only protects against tcmalloc interfering with
- // itself, not with other libraries calling set_new_handler.
- std::new_handler nh;
- {
- SpinLockHolder h(&set_new_handler_lock);
- nh = std::set_new_handler(0);
- (void) std::set_new_handler(nh);
- }
- // If no new_handler is established, the allocation failed.
- if (!nh) {
- if (nothrow) return 0;
- throw std::bad_alloc();
- }
- // Otherwise, try the new_handler. If it returns, retry the
- // allocation. If it throws std::bad_alloc, fail the allocation.
- // if it throws something else, don't interfere.
- try {
- (*nh)();
- } catch (const std::bad_alloc&) {
- if (!nothrow) throw;
- return p;
- }
- } else { // allocation success
- return p;
- }
-#endif
- }
-}
-
-#if ENABLE(GLOBAL_FASTMALLOC_NEW)
-
-void* operator new(size_t size) {
- void* p = cpp_alloc(size, false);
- // We keep this next instruction out of cpp_alloc for a reason: when
- // it's in, and new just calls cpp_alloc, the optimizer may fold the
- // new call into cpp_alloc, which messes up our whole section-based
- // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
- // isn't the last thing this fn calls, and prevents the folding.
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-void* operator new(size_t size, const std::nothrow_t&) __THROW {
- void* p = cpp_alloc(size, true);
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-void operator delete(void* p) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
+ bmalloc::api::scavengeThisThread();
}
-void operator delete(void* p, const std::nothrow_t&) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-void* operator new[](size_t size) {
- void* p = cpp_alloc(size, false);
- // We keep this next instruction out of cpp_alloc for a reason: when
- // it's in, and new just calls cpp_alloc, the optimizer may fold the
- // new call into cpp_alloc, which messes up our whole section-based
- // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
- // isn't the last thing this fn calls, and prevents the folding.
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-void* operator new[](size_t size, const std::nothrow_t&) __THROW {
- void* p = cpp_alloc(size, true);
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-void operator delete[](void* p) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-void operator delete[](void* p, const std::nothrow_t&) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-#endif
-
-extern "C" void* memalign(size_t align, size_t size) __THROW {
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
- __THROW {
- if (((align % sizeof(void*)) != 0) ||
- ((align & (align - 1)) != 0) ||
- (align == 0)) {
- return EINVAL;
- }
-
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- if (result == NULL) {
- return ENOMEM;
- } else {
- *result_ptr = result;
- return 0;
- }
-}
-
-static size_t pagesize = 0;
-
-extern "C" void* valloc(size_t size) __THROW {
- // Allocate page-aligned object of length >= size bytes
- if (pagesize == 0) pagesize = getpagesize();
- void* result = do_memalign(pagesize, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void* pvalloc(size_t size) __THROW {
- // Round up size to a multiple of pagesize
- if (pagesize == 0) pagesize = getpagesize();
- size = (size + pagesize - 1) & ~(pagesize - 1);
- void* result = do_memalign(pagesize, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void malloc_stats(void) {
- do_malloc_stats();
-}
-
-extern "C" int mallopt(int cmd, int value) {
- return do_mallopt(cmd, value);
-}
-
-#ifdef HAVE_STRUCT_MALLINFO
-extern "C" struct mallinfo mallinfo(void) {
- return do_mallinfo();
-}
-#endif
-
-//-------------------------------------------------------------------
-// Some library routines on RedHat 9 allocate memory using malloc()
-// and free it using __libc_free() (or vice-versa). Since we provide
-// our own implementations of malloc/free, we need to make sure that
-// the __libc_XXX variants (defined as part of glibc) also point to
-// the same implementations.
-//-------------------------------------------------------------------
-
-#if defined(__GLIBC__)
-extern "C" {
-#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
- // Potentially faster variants that use the gcc alias extension.
- // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
-# define ALIAS(x) __attribute__ ((weak, alias (x)))
- void* __libc_malloc(size_t size) ALIAS("malloc");
- void __libc_free(void* ptr) ALIAS("free");
- void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
- void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
- void __libc_cfree(void* ptr) ALIAS("cfree");
- void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
- void* __libc_valloc(size_t size) ALIAS("valloc");
- void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
- int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
-# undef ALIAS
-# else /* not __GNUC__ */
- // Portable wrappers
- void* __libc_malloc(size_t size) { return malloc(size); }
- void __libc_free(void* ptr) { free(ptr); }
- void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
- void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
- void __libc_cfree(void* ptr) { cfree(ptr); }
- void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
- void* __libc_valloc(size_t size) { return valloc(size); }
- void* __libc_pvalloc(size_t size) { return pvalloc(size); }
- int __posix_memalign(void** r, size_t a, size_t s) {
- return posix_memalign(r, a, s);
- }
-# endif /* __GNUC__ */
-}
-#endif /* __GLIBC__ */
-
-// Override __libc_memalign in libc on linux boxes specially.
-// They have a bug in libc that causes them to (very rarely) allocate
-// with __libc_memalign() yet deallocate with free() and the
-// definitions above don't catch it.
-// This function is an exception to the rule of calling MallocHook method
-// from the stack frame of the allocation function;
-// heap-checker handles this special case explicitly.
-static void *MemalignOverride(size_t align, size_t size, const void *caller)
- __THROW {
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
-
-#endif
-
-#ifdef WTF_CHANGES
void releaseFastMallocFreeMemory()
{
- // Flush free pages in the current thread cache back to the page heap.
- if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent())
- threadCache->Cleanup();
-
- SpinLockHolder h(&pageheap_lock);
- pageheap->ReleaseFreePages();
+ bmalloc::api::scavenge();
}
FastMallocStatistics fastMallocStatistics()
{
- FastMallocStatistics statistics;
-
- SpinLockHolder lockHolder(&pageheap_lock);
- statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes());
- statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes();
+ // FIXME: Can bmalloc itself report the stats instead of relying on the OS?
+ FastMallocStatistics statistics;
statistics.freeListBytes = 0;
- for (unsigned cl = 0; cl < kNumClasses; ++cl) {
- const int length = central_cache[cl].length();
- const int tc_length = central_cache[cl].tc_length();
-
- statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length);
- }
- for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
- statistics.freeListBytes += threadCache->Size();
-
- return statistics;
-}
+ statistics.reservedVMBytes = 0;
-size_t fastMallocSize(const void* ptr)
-{
-#if ENABLE(WTF_MALLOC_VALIDATION)
- return Internal::fastMallocValidationHeader(const_cast<void*>(ptr))->m_size;
+#if OS(WINDOWS)
+ PROCESS_MEMORY_COUNTERS resourceUsage;
+ GetProcessMemoryInfo(GetCurrentProcess(), &resourceUsage, sizeof(resourceUsage));
+ statistics.committedVMBytes = resourceUsage.PeakWorkingSetSize;
#else
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- Span* span = pageheap->GetDescriptorEnsureSafe(p);
-
- if (!span || span->free)
- return 0;
-
- for (HardenedSLL free = span->objects; free; free = SLL_Next(free, HARDENING_ENTROPY)) {
- if (ptr == free.value())
- return 0;
- }
-
- if (size_t cl = span->sizeclass)
- return ByteSizeForClass(cl);
-
- return span->length << kPageShift;
-#endif
-}
+ struct rusage resourceUsage;
+ getrusage(RUSAGE_SELF, &resourceUsage);
#if OS(DARWIN)
-class RemoteMemoryReader {
- task_t m_task;
- memory_reader_t* m_reader;
-
-public:
- RemoteMemoryReader(task_t task, memory_reader_t* reader)
- : m_task(task)
- , m_reader(reader)
- { }
-
- void* operator()(vm_address_t address, size_t size) const
- {
- void* output;
- kern_return_t err = (*m_reader)(m_task, address, size, static_cast<void**>(&output));
- if (err)
- output = 0;
- return output;
- }
-
- template <typename T>
- T* operator()(T* address, size_t size = sizeof(T)) const
- {
- return static_cast<T*>((*this)(reinterpret_cast<vm_address_t>(address), size));
- }
-
- template <typename T>
- T* nextEntryInHardenedLinkedList(T** remoteAddress, uintptr_t entropy) const
- {
- T** localAddress = (*this)(remoteAddress);
- if (!localAddress)
- return 0;
- T* hardenedNext = *localAddress;
- if (!hardenedNext || hardenedNext == (void*)entropy)
- return 0;
- return XOR_MASK_PTR_WITH_KEY(hardenedNext, remoteAddress, entropy);
- }
-};
-
-template <typename T>
-template <typename Recorder>
-void PageHeapAllocator<T>::recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
-{
- for (HardenedSLL adminAllocation = allocated_regions_; adminAllocation; adminAllocation.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(adminAllocation.value()), entropy_)))
- recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation.value()), kAllocIncrement);
-}
-
-class FreeObjectFinder {
- const RemoteMemoryReader& m_reader;
- HashSet<void*> m_freeObjects;
-
-public:
- FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
-
- void visit(void* ptr) { m_freeObjects.add(ptr); }
- bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
- bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
- size_t freeObjectCount() const { return m_freeObjects.size(); }
-
- void findFreeObjects(TCMalloc_ThreadCache* threadCache)
- {
- for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
- threadCache->enumerateFreeObjects(*this, m_reader);
- }
-
- void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
- {
- for (unsigned i = 0; i < numSizes; i++)
- centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
- }
-};
-
-class PageMapFreeObjectFinder {
- const RemoteMemoryReader& m_reader;
- FreeObjectFinder& m_freeObjectFinder;
- uintptr_t m_entropy;
-
-public:
- PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder, uintptr_t entropy)
- : m_reader(reader)
- , m_freeObjectFinder(freeObjectFinder)
- , m_entropy(entropy)
- {
-#if ENABLE(TCMALLOC_HARDENING)
- ASSERT(m_entropy);
-#endif
- }
-
- int visit(void* ptr) const
- {
- if (!ptr)
- return 1;
-
- Span* span = m_reader(reinterpret_cast<Span*>(ptr));
- if (!span)
- return 1;
-
- if (span->free) {
- void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
- m_freeObjectFinder.visit(ptr);
- } else if (span->sizeclass) {
- // Walk the free list of the small-object span, keeping track of each object seen
- for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(m_reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), m_entropy)))
- m_freeObjectFinder.visit(nextObject.value());
- }
- return span->length;
- }
-};
-
-class PageMapMemoryUsageRecorder {
- task_t m_task;
- void* m_context;
- unsigned m_typeMask;
- vm_range_recorder_t* m_recorder;
- const RemoteMemoryReader& m_reader;
- const FreeObjectFinder& m_freeObjectFinder;
-
- HashSet<void*> m_seenPointers;
- Vector<Span*> m_coalescedSpans;
-
-public:
- PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
- : m_task(task)
- , m_context(context)
- , m_typeMask(typeMask)
- , m_recorder(recorder)
- , m_reader(reader)
- , m_freeObjectFinder(freeObjectFinder)
- { }
-
- ~PageMapMemoryUsageRecorder()
- {
- ASSERT(!m_coalescedSpans.size());
- }
-
- void recordPendingRegions()
- {
- bool recordRegionsContainingPointers = m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE;
- bool recordAllocations = m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE;
-
- if (!recordRegionsContainingPointers && !recordAllocations) {
- m_coalescedSpans.clear();
- return;
- }
-
- Vector<vm_range_t, 256> pointerRegions;
- Vector<vm_range_t, 1024> allocatedPointers;
- for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
- Span *theSpan = m_coalescedSpans[i];
- vm_address_t spanStartAddress = theSpan->start << kPageShift;
- vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
-
- if (recordRegionsContainingPointers)
- pointerRegions.append((vm_range_t){spanStartAddress, spanSizeInBytes});
-
- if (theSpan->free || !recordAllocations)
- continue;
-
- if (!theSpan->sizeclass) {
- // If it's an allocated large object span, mark it as in use
- if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
- allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
- } else {
- const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
-
- // Mark each allocated small object within the span as in use
- const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
- for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
- if (!m_freeObjectFinder.isFreeObject(object))
- allocatedPointers.append((vm_range_t){object, objectSize});
- }
- }
- }
-
- if (recordRegionsContainingPointers)
- (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, pointerRegions.data(), pointerRegions.size());
-
- if (recordAllocations)
- (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
-
- m_coalescedSpans.clear();
- }
-
- int visit(void* ptr)
- {
- if (!ptr)
- return 1;
-
- Span* span = m_reader(reinterpret_cast<Span*>(ptr));
- if (!span || !span->start)
- return 1;
-
- if (!m_seenPointers.add(ptr).isNewEntry)
- return span->length;
-
- if (!m_coalescedSpans.size()) {
- m_coalescedSpans.append(span);
- return span->length;
- }
-
- Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
- vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
- vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
-
- // If the new span is adjacent to the previous span, do nothing for now.
- vm_address_t spanStartAddress = span->start << kPageShift;
- if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
- m_coalescedSpans.append(span);
- return span->length;
- }
-
- // New span is not adjacent to previous span, so record the spans coalesced so far.
- recordPendingRegions();
- m_coalescedSpans.append(span);
-
- return span->length;
- }
-};
-
-class AdminRegionRecorder {
- task_t m_task;
- void* m_context;
- unsigned m_typeMask;
- vm_range_recorder_t* m_recorder;
-
- Vector<vm_range_t, 1024> m_pendingRegions;
-
-public:
- AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder)
- : m_task(task)
- , m_context(context)
- , m_typeMask(typeMask)
- , m_recorder(recorder)
- { }
-
- void recordRegion(vm_address_t ptr, size_t size)
- {
- if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
- m_pendingRegions.append((vm_range_t){ ptr, size });
- }
-
- void visit(void *ptr, size_t size)
- {
- recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
- }
-
- void recordPendingRegions()
- {
- if (m_pendingRegions.size()) {
- (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
- m_pendingRegions.clear();
- }
- }
-
- ~AdminRegionRecorder()
- {
- ASSERT(!m_pendingRegions.size());
- }
-};
-
-kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
-{
- RemoteMemoryReader memoryReader(task, reader);
-
- InitSizeClasses();
-
- FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
- TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
- TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
- TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
-
- TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
-
- FreeObjectFinder finder(memoryReader);
- finder.findFreeObjects(threadHeaps);
- finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
-
- TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
- PageMapFreeObjectFinder pageMapFinder(memoryReader, finder, pageHeap->entropy_);
- pageMap->visitValues(pageMapFinder, memoryReader);
-
- PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
- pageMap->visitValues(usageRecorder, memoryReader);
- usageRecorder.recordPendingRegions();
-
- AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder);
- pageMap->visitAllocations(adminRegionRecorder, memoryReader);
-
- PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
- PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
-
- spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
- pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
-
- adminRegionRecorder.recordPendingRegions();
-
- return 0;
-}
-
-size_t FastMallocZone::size(malloc_zone_t*, const void*)
-{
- return 0;
-}
-
-void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
-{
- return 0;
-}
-
-void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
-{
- return 0;
-}
-
-void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
-{
- // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
- // is not in this zone. When this happens, the pointer being freed was not allocated by any
- // zone so we need to print a useful error for the application developer.
- malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
-}
-
-void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
-{
- return 0;
-}
-
-
-#undef malloc
-#undef free
-#undef realloc
-#undef calloc
-
-extern "C" {
-malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
- &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
- , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
- , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
-
- };
-}
-
-FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
- : m_pageHeap(pageHeap)
- , m_threadHeaps(threadHeaps)
- , m_centralCaches(centralCaches)
- , m_spanAllocator(spanAllocator)
- , m_pageHeapAllocator(pageHeapAllocator)
-{
- memset(&m_zone, 0, sizeof(m_zone));
- m_zone.version = 4;
- m_zone.zone_name = "JavaScriptCore FastMalloc";
- m_zone.size = &FastMallocZone::size;
- m_zone.malloc = &FastMallocZone::zoneMalloc;
- m_zone.calloc = &FastMallocZone::zoneCalloc;
- m_zone.realloc = &FastMallocZone::zoneRealloc;
- m_zone.free = &FastMallocZone::zoneFree;
- m_zone.valloc = &FastMallocZone::zoneValloc;
- m_zone.destroy = &FastMallocZone::zoneDestroy;
- m_zone.introspect = &jscore_fastmalloc_introspection;
- malloc_zone_register(&m_zone);
-}
-
+ statistics.committedVMBytes = resourceUsage.ru_maxrss;
+#else
+ statistics.committedVMBytes = resourceUsage.ru_maxrss * 1024;
+#endif // OS(DARWIN)
-void FastMallocZone::init()
-{
- static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
+#endif // OS(WINDOWS)
+ return statistics;
}
-#endif // OS(DARWIN)
-
} // namespace WTF
-#endif // WTF_CHANGES
-#endif // FORCE_SYSTEM_MALLOC
+#endif // defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
diff --git a/Source/WTF/wtf/FastMalloc.h b/Source/WTF/wtf/FastMalloc.h
index f04dc97dc..525f9cbaf 100644
--- a/Source/WTF/wtf/FastMalloc.h
+++ b/Source/WTF/wtf/FastMalloc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2005-2009, 2015-2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -23,203 +23,94 @@
#include <new>
#include <stdlib.h>
-#include <wtf/Platform.h>
-#include <wtf/PossiblyNull.h>
#include <wtf/StdLibExtras.h>
namespace WTF {
- // These functions call CRASH() if an allocation fails.
- WTF_EXPORT_PRIVATE void* fastMalloc(size_t);
- WTF_EXPORT_PRIVATE void* fastZeroedMalloc(size_t);
- WTF_EXPORT_PRIVATE void* fastCalloc(size_t numElements, size_t elementSize);
- WTF_EXPORT_PRIVATE void* fastRealloc(void*, size_t);
- WTF_EXPORT_PRIVATE char* fastStrDup(const char*);
- WTF_EXPORT_PRIVATE size_t fastMallocSize(const void*);
- WTF_EXPORT_PRIVATE size_t fastMallocGoodSize(size_t);
-
- struct TryMallocReturnValue {
- TryMallocReturnValue(void* data)
- : m_data(data)
- {
- }
- TryMallocReturnValue(const TryMallocReturnValue& source)
- : m_data(source.m_data)
- {
- source.m_data = 0;
- }
- ~TryMallocReturnValue() { ASSERT(!m_data); }
- template <typename T> bool getValue(T& data) WARN_UNUSED_RETURN;
- template <typename T> operator PossiblyNull<T>()
- {
- T value;
- getValue(value);
- return PossiblyNull<T>(value);
- }
- private:
- mutable void* m_data;
- };
-
- template <typename T> bool TryMallocReturnValue::getValue(T& data)
- {
- union u { void* data; T target; } res;
- res.data = m_data;
- data = res.target;
- bool returnValue = !!m_data;
- m_data = 0;
- return returnValue;
- }
-
- WTF_EXPORT_PRIVATE TryMallocReturnValue tryFastMalloc(size_t n);
- TryMallocReturnValue tryFastZeroedMalloc(size_t n);
- WTF_EXPORT_PRIVATE TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size);
- WTF_EXPORT_PRIVATE TryMallocReturnValue tryFastRealloc(void* p, size_t n);
-
- WTF_EXPORT_PRIVATE void fastFree(void*);
-
-#ifndef NDEBUG
- WTF_EXPORT_PRIVATE void fastMallocForbid();
- WTF_EXPORT_PRIVATE void fastMallocAllow();
+#if !defined(NDEBUG)
+WTF_EXPORT_PRIVATE void fastSetMaxSingleAllocationSize(size_t);
#endif
- WTF_EXPORT_PRIVATE void releaseFastMallocFreeMemory();
-
- struct FastMallocStatistics {
- size_t reservedVMBytes;
- size_t committedVMBytes;
- size_t freeListBytes;
- };
- WTF_EXPORT_PRIVATE FastMallocStatistics fastMallocStatistics();
-
- // This defines a type which holds an unsigned integer and is the same
- // size as the minimally aligned memory allocation.
- typedef unsigned long long AllocAlignmentInteger;
-
- namespace Internal {
- enum AllocType { // Start with an unusual number instead of zero, because zero is common.
- AllocTypeMalloc = 0x375d6750, // Encompasses fastMalloc, fastZeroedMalloc, fastCalloc, fastRealloc.
- AllocTypeClassNew, // Encompasses class operator new from FastAllocBase.
- AllocTypeClassNewArray, // Encompasses class operator new[] from FastAllocBase.
- AllocTypeNew, // Encompasses global operator new.
- AllocTypeNewArray // Encompasses global operator new[].
- };
-
- enum {
- ValidationPrefix = 0xf00df00d,
- ValidationSuffix = 0x0badf00d
- };
-
- typedef unsigned ValidationTag;
-
- struct ValidationHeader {
- AllocType m_type;
- unsigned m_size;
- ValidationTag m_prefix;
- unsigned m_alignment;
- };
-
- static const int ValidationBufferSize = sizeof(ValidationHeader) + sizeof(ValidationTag);
- }
-
-#if ENABLE(WTF_MALLOC_VALIDATION)
-
- // Malloc validation is a scheme whereby a tag is attached to an
- // allocation which identifies how it was originally allocated.
- // This allows us to verify that the freeing operation matches the
- // allocation operation. If memory is allocated with operator new[]
- // but freed with free or delete, this system would detect that.
- // In the implementation here, the tag is an integer prepended to
- // the allocation memory which is assigned one of the AllocType
- // enumeration values. An alternative implementation of this
- // scheme could store the tag somewhere else or ignore it.
- // Users of FastMalloc don't need to know or care how this tagging
- // is implemented.
-
- namespace Internal {
-
- // Handle a detected alloc/free mismatch. By default this calls CRASH().
- void fastMallocMatchFailed(void* p);
-
- inline ValidationHeader* fastMallocValidationHeader(void* p)
- {
- return reinterpret_cast<ValidationHeader*>(static_cast<char*>(p) - sizeof(ValidationHeader));
- }
-
- inline ValidationTag* fastMallocValidationSuffix(void* p)
- {
- ValidationHeader* header = fastMallocValidationHeader(p);
- if (header->m_prefix != static_cast<unsigned>(ValidationPrefix))
- fastMallocMatchFailed(p);
-
- return reinterpret_cast<ValidationTag*>(static_cast<char*>(p) + header->m_size);
- }
-
- // Return the AllocType tag associated with the allocated block p.
- inline AllocType fastMallocMatchValidationType(void* p)
- {
- return fastMallocValidationHeader(p)->m_type;
- }
-
- // Set the AllocType tag to be associaged with the allocated block p.
- inline void setFastMallocMatchValidationType(void* p, AllocType allocType)
- {
- fastMallocValidationHeader(p)->m_type = allocType;
- }
-
- } // namespace Internal
-
- // This is a higher level function which is used by FastMalloc-using code.
- inline void fastMallocMatchValidateMalloc(void* p, Internal::AllocType allocType)
- {
- if (!p)
- return;
-
- Internal::setFastMallocMatchValidationType(p, allocType);
- }
-
- // This is a higher level function which is used by FastMalloc-using code.
- inline void fastMallocMatchValidateFree(void* p, Internal::AllocType)
- {
- if (!p)
- return;
-
- Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p);
- if (header->m_prefix != static_cast<unsigned>(Internal::ValidationPrefix))
- Internal::fastMallocMatchFailed(p);
-
- if (*Internal::fastMallocValidationSuffix(p) != Internal::ValidationSuffix)
- Internal::fastMallocMatchFailed(p);
+class TryMallocReturnValue {
+public:
+ TryMallocReturnValue(void*);
+ TryMallocReturnValue(TryMallocReturnValue&&);
+ ~TryMallocReturnValue();
+ template<typename T> bool getValue(T*&) WARN_UNUSED_RETURN;
+private:
+ void operator=(TryMallocReturnValue&&) = delete;
+ mutable void* m_data;
+};
+
+WTF_EXPORT_PRIVATE bool isFastMallocEnabled();
+
+// These functions call CRASH() if an allocation fails.
+WTF_EXPORT_PRIVATE void* fastMalloc(size_t) RETURNS_NONNULL;
+WTF_EXPORT_PRIVATE void* fastZeroedMalloc(size_t) RETURNS_NONNULL;
+WTF_EXPORT_PRIVATE void* fastCalloc(size_t numElements, size_t elementSize) RETURNS_NONNULL;
+WTF_EXPORT_PRIVATE void* fastRealloc(void*, size_t) RETURNS_NONNULL;
+WTF_EXPORT_PRIVATE char* fastStrDup(const char*) RETURNS_NONNULL;
+
+WTF_EXPORT_PRIVATE TryMallocReturnValue tryFastMalloc(size_t);
+TryMallocReturnValue tryFastZeroedMalloc(size_t);
+WTF_EXPORT_PRIVATE TryMallocReturnValue tryFastCalloc(size_t numElements, size_t elementSize);
+
+WTF_EXPORT_PRIVATE void fastFree(void*);
+
+// Allocations from fastAlignedMalloc() must be freed using fastAlignedFree().
+WTF_EXPORT_PRIVATE void* fastAlignedMalloc(size_t alignment, size_t) RETURNS_NONNULL;
+WTF_EXPORT_PRIVATE void* tryFastAlignedMalloc(size_t alignment, size_t);
+WTF_EXPORT_PRIVATE void fastAlignedFree(void*);
+
+WTF_EXPORT_PRIVATE size_t fastMallocSize(const void*);
+
+// FIXME: This is non-helpful; fastMallocGoodSize will be removed soon.
+WTF_EXPORT_PRIVATE size_t fastMallocGoodSize(size_t);
+
+WTF_EXPORT_PRIVATE void releaseFastMallocFreeMemory();
+WTF_EXPORT_PRIVATE void releaseFastMallocFreeMemoryForThisThread();
+
+struct FastMallocStatistics {
+ size_t reservedVMBytes;
+ size_t committedVMBytes;
+ size_t freeListBytes;
+};
+WTF_EXPORT_PRIVATE FastMallocStatistics fastMallocStatistics();
+
+// This defines a type which holds an unsigned integer and is the same
+// size as the minimally aligned memory allocation.
+typedef unsigned long long AllocAlignmentInteger;
+
+inline TryMallocReturnValue::TryMallocReturnValue(void* data)
+ : m_data(data)
+{
+}
+
+inline TryMallocReturnValue::TryMallocReturnValue(TryMallocReturnValue&& source)
+ : m_data(source.m_data)
+{
+ source.m_data = nullptr;
+}
+
+inline TryMallocReturnValue::~TryMallocReturnValue()
+{
+ ASSERT(!m_data);
+}
+
+template<typename T> inline bool TryMallocReturnValue::getValue(T*& data)
+{
+ data = static_cast<T*>(m_data);
+ m_data = nullptr;
+ return data;
+}
- Internal::setFastMallocMatchValidationType(p, Internal::AllocTypeMalloc); // Set it to this so that fastFree thinks it's OK.
- }
-
- inline void fastMallocValidate(void* p)
- {
- if (!p)
- return;
-
- Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p);
- if (header->m_prefix != static_cast<unsigned>(Internal::ValidationPrefix))
- Internal::fastMallocMatchFailed(p);
-
- if (*Internal::fastMallocValidationSuffix(p) != Internal::ValidationSuffix)
- Internal::fastMallocMatchFailed(p);
- }
-
-#else
-
- inline void fastMallocMatchValidateMalloc(void*, Internal::AllocType)
- {
- }
-
- inline void fastMallocMatchValidateFree(void*, Internal::AllocType)
- {
- }
+} // namespace WTF
+#if !defined(NDEBUG)
+using WTF::fastSetMaxSingleAllocationSize;
#endif
-} // namespace WTF
-
+using WTF::isFastMallocEnabled;
using WTF::fastCalloc;
using WTF::fastFree;
using WTF::fastMalloc;
@@ -228,19 +119,16 @@ using WTF::fastMallocSize;
using WTF::fastRealloc;
using WTF::fastStrDup;
using WTF::fastZeroedMalloc;
+using WTF::tryFastAlignedMalloc;
using WTF::tryFastCalloc;
using WTF::tryFastMalloc;
-using WTF::tryFastRealloc;
using WTF::tryFastZeroedMalloc;
+using WTF::fastAlignedMalloc;
+using WTF::fastAlignedFree;
-#ifndef NDEBUG
-using WTF::fastMallocForbid;
-using WTF::fastMallocAllow;
-#endif
-
-#if COMPILER(GCC) && OS(DARWIN)
+#if COMPILER(GCC_OR_CLANG) && OS(DARWIN)
#define WTF_PRIVATE_INLINE __private_extern__ inline __attribute__((always_inline))
-#elif COMPILER(GCC)
+#elif COMPILER(GCC_OR_CLANG)
#define WTF_PRIVATE_INLINE inline __attribute__((always_inline))
#elif COMPILER(MSVC)
#define WTF_PRIVATE_INLINE __forceinline
@@ -248,73 +136,28 @@ using WTF::fastMallocAllow;
#define WTF_PRIVATE_INLINE inline
#endif
-#if !defined(_CRTDBG_MAP_ALLOC) && !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC)
-
-// The nothrow functions here are actually not all that helpful, because fastMalloc will
-// call CRASH() rather than returning 0, and returning 0 is what nothrow is all about.
-// But since WebKit code never uses exceptions or nothrow at all, this is probably OK.
-// Long term we will adopt FastAllocBase.h everywhere, and and replace this with
-// debug-only code to make sure we don't use the system malloc via the default operator
-// new by accident.
-
-#if ENABLE(GLOBAL_FASTMALLOC_NEW)
-
-#if COMPILER(MSVC)
-#pragma warning(push)
-#pragma warning(disable: 4290) // Disable the C++ exception specification ignored warning.
-#elif COMPILER(CLANG) && defined(__has_warning)
-#pragma clang diagnostic push
-#if __has_warning("-Winline-new-delete")
-// FIXME: The operator new, delete definitions cannot be inline per replacement.functions (17.6.4.6/3) of the C++
-// standard. As a workaround, disable warnings for such usage. See <https://bugs.webkit.org/show_bug.cgi?id=124186>.
-#pragma clang diagnostic ignored "-Winline-new-delete"
-#endif
-#endif
-WTF_PRIVATE_INLINE void* operator new(size_t size) throw (std::bad_alloc) { return fastMalloc(size); }
-WTF_PRIVATE_INLINE void* operator new(size_t size, const std::nothrow_t&) throw() { return fastMalloc(size); }
-WTF_PRIVATE_INLINE void operator delete(void* p) throw() { fastFree(p); }
-WTF_PRIVATE_INLINE void operator delete(void* p, const std::nothrow_t&) throw() { fastFree(p); }
-WTF_PRIVATE_INLINE void* operator new[](size_t size) throw (std::bad_alloc) { return fastMalloc(size); }
-WTF_PRIVATE_INLINE void* operator new[](size_t size, const std::nothrow_t&) throw() { return fastMalloc(size); }
-WTF_PRIVATE_INLINE void operator delete[](void* p) throw() { fastFree(p); }
-WTF_PRIVATE_INLINE void operator delete[](void* p, const std::nothrow_t&) throw() { fastFree(p); }
-#if COMPILER(MSVC)
-#pragma warning(pop)
-#elif COMPILER(CLANG) && defined(__has_warning)
-#pragma clang diagnostic pop
-#endif
-
-#endif // ENABLE(GLOBAL_FASTMALLOC_NEW)
-#endif // !defined(_CRTDBG_MAP_ALLOC) && !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC)
-
-#define WTF_FASTMALLOC_OPERATORS \
+#define WTF_MAKE_FAST_ALLOCATED \
public: \
void* operator new(size_t, void* p) { return p; } \
void* operator new[](size_t, void* p) { return p; } \
\
void* operator new(size_t size) \
{ \
- void* p = ::WTF::fastMalloc(size); \
- ::WTF::fastMallocMatchValidateMalloc(p, ::WTF::Internal::AllocTypeClassNew); \
- return p; \
+ return ::WTF::fastMalloc(size); \
} \
\
void operator delete(void* p) \
{ \
- ::WTF::fastMallocMatchValidateFree(p, ::WTF::Internal::AllocTypeClassNew); \
::WTF::fastFree(p); \
} \
\
void* operator new[](size_t size) \
{ \
- void* p = ::WTF::fastMalloc(size); \
- ::WTF::fastMallocMatchValidateMalloc(p, ::WTF::Internal::AllocTypeClassNewArray); \
- return p; \
+ return ::WTF::fastMalloc(size); \
} \
\
void operator delete[](void* p) \
{ \
- ::WTF::fastMallocMatchValidateFree(p, ::WTF::Internal::AllocTypeClassNewArray); \
::WTF::fastFree(p); \
} \
void* operator new(size_t, NotNullTag, void* location) \
@@ -325,11 +168,4 @@ public: \
private: \
typedef int __thisIsHereToForceASemicolonAfterThisMacro
-#if ENABLE(GLOBAL_FASTMALLOC_NEW)
-#define WTF_MAKE_FAST_ALLOCATED
-#else
-#define WTF_MAKE_FAST_ALLOCATED WTF_FASTMALLOC_OPERATORS
-#endif
-
-
#endif /* WTF_FastMalloc_h */
diff --git a/Source/WTF/wtf/FeatureDefines.h b/Source/WTF/wtf/FeatureDefines.h
index b1fe23942..88a3bda88 100644
--- a/Source/WTF/wtf/FeatureDefines.h
+++ b/Source/WTF/wtf/FeatureDefines.h
@@ -13,10 +13,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -34,52 +34,42 @@
* - "1" enables the feature by default. The feature can still be disabled for a specific port or environment.
*
* The feature defaults in this file are only taken into account if the (port specific) build system
- * has not enabled or disabled a particular feature.
+ * has not enabled or disabled a particular feature.
*
* Use this file to define ENABLE() macros only. Do not use this file to define USE() or macros !
*
* Only define a macro if it was not defined before - always check for !defined first.
- *
+ *
* Keep the file sorted by the name of the defines. As an exception you can change the order
* to allow interdependencies between the default values.
- *
+ *
* Below are a few potential commands to take advantage of this file running from the Source/WTF directory
*
* Get the list of feature defines: grep -o "ENABLE_\(\w\+\)" wtf/FeatureDefines.h | sort | uniq
- * Get the list of features enabled by default for a PLATFORM(XXX): gcc -E -dM -I. -DWTF_PLATFORM_XXX "wtf/Platform.h" | grep "ENABLE_\w\+ 1" | cut -d' ' -f2 | sort
+ * Get the list of features enabled by default for a PLATFORM(XXX): gcc -E -dM -I. -DWTF_PLATFORM_XXX "wtf/Platform.h" | grep "ENABLE_\w\+ 1" | cut -d' ' -f2 | sort
*/
/* FIXME: Move out the PLATFORM specific rules into platform specific files. */
/* --------- Apple IOS (but not MAC) port --------- */
-/* PLATFORM(IOS) is a specialization of PLATFORM(MAC). */
-/* PLATFORM(MAC) is always enabled when PLATFORM(IOS) is enabled. */
#if PLATFORM(IOS)
#if !defined(ENABLE_ASYNC_SCROLLING)
#define ENABLE_ASYNC_SCROLLING 1
#endif
-#if !defined(ENABLE_8BIT_TEXTRUN)
-#define ENABLE_8BIT_TEXTRUN 1
+#if !defined(ENABLE_CONTENT_EXTENSIONS)
+#define ENABLE_CONTENT_EXTENSIONS 1
#endif
#if !defined(ENABLE_CONTEXT_MENUS)
#define ENABLE_CONTEXT_MENUS 0
#endif
-#if !defined(ENABLE_CSS_IMAGE_SET)
-#define ENABLE_CSS_IMAGE_SET 1
-#endif
-
#if !defined(ENABLE_CURSOR_SUPPORT)
#define ENABLE_CURSOR_SUPPORT 0
#endif
-#if !defined(ENABLE_DISK_IMAGE_CACHE)
-#define ENABLE_DISK_IMAGE_CACHE 1
-#endif
-
#if !defined(ENABLE_DRAG_SUPPORT)
#define ENABLE_DRAG_SUPPORT 0
#endif
@@ -92,6 +82,10 @@
#define ENABLE_ICONDATABASE 0
#endif
+#if !defined(ENABLE_INSPECTOR_ALTERNATE_DISPATCHERS)
+#define ENABLE_INSPECTOR_ALTERNATE_DISPATCHERS 1
+#endif
+
#if !defined(ENABLE_LETTERPRESS)
#define ENABLE_LETTERPRESS 1
#endif
@@ -100,15 +94,15 @@
#define ENABLE_IOS_AUTOCORRECT_AND_AUTOCAPITALIZE 1
#endif
-#if !defined(ENABLE_IOS_GESTURE_EVENTS)
+#if !defined(ENABLE_IOS_GESTURE_EVENTS) && USE(APPLE_INTERNAL_SDK)
#define ENABLE_IOS_GESTURE_EVENTS 1
#endif
-#if !defined(ENABLE_IOS_TEXT_AUTOSIZING)
-#define ENABLE_IOS_TEXT_AUTOSIZING 1
+#if !defined(ENABLE_TEXT_AUTOSIZING)
+#define ENABLE_TEXT_AUTOSIZING 1
#endif
-#if !defined(ENABLE_IOS_TOUCH_EVENTS)
+#if !defined(ENABLE_IOS_TOUCH_EVENTS) && USE(APPLE_INTERNAL_SDK)
#define ENABLE_IOS_TOUCH_EVENTS 1
#endif
@@ -132,10 +126,6 @@
#define ENABLE_REMOTE_INSPECTOR 1
#endif
-#if !defined(ENABLE_REPAINT_THROTTLING)
-#define ENABLE_REPAINT_THROTTLING 0
-#endif
-
#if !defined(ENABLE_RESPECT_EXIF_ORIENTATION)
#define ENABLE_RESPECT_EXIF_ORIENTATION 1
#endif
@@ -148,7 +138,9 @@
#define ENABLE_TEXT_SELECTION 0
#endif
-#if !defined(ENABLE_TOUCH_EVENTS)
+/* FIXME: Remove the USE(APPLE_INTERNAL_SDK) conjunct once we support touch events when building against
+the public iOS SDK. We will also need to update the FeatureDefines.xcconfig files. */
+#if !defined(ENABLE_TOUCH_EVENTS) && USE(APPLE_INTERNAL_SDK)
#define ENABLE_TOUCH_EVENTS 1
#endif
@@ -160,38 +152,37 @@
#define ENABLE_VIEW_MODE_CSS_MEDIA 0
#endif
+#if !defined(ENABLE_WEBASSEMBLY)
+#define ENABLE_WEBASSEMBLY (defined(ENABLE_B3_JIT) && ENABLE_B3_JIT)
+#endif
+
#if !defined(ENABLE_WEBGL)
#define ENABLE_WEBGL 1
#endif
+#if !defined(ENABLE_PRIMARY_SNAPSHOTTED_PLUGIN_HEURISTIC)
+#define ENABLE_PRIMARY_SNAPSHOTTED_PLUGIN_HEURISTIC 1
+#endif
+
+#if !defined(ENABLE_DOWNLOAD_ATTRIBUTE)
+#define ENABLE_DOWNLOAD_ATTRIBUTE 0
+#endif
+
#endif /* PLATFORM(IOS) */
/* --------- Apple MAC port (not IOS) --------- */
-#if PLATFORM(MAC) && !PLATFORM(IOS)
-
-#if !defined(ENABLE_8BIT_TEXTRUN)
-#define ENABLE_8BIT_TEXTRUN 1
-#endif
+#if PLATFORM(MAC)
-#if !defined(ENABLE_CSS_IMAGE_SET)
-#define ENABLE_CSS_IMAGE_SET 1
+#if !defined(ENABLE_CONTENT_EXTENSIONS)
+#define ENABLE_CONTENT_EXTENSIONS 1
#endif
#if !defined(ENABLE_DASHBOARD_SUPPORT)
#define ENABLE_DASHBOARD_SUPPORT 1
#endif
-#if !defined(ENABLE_DELETION_UI)
-#define ENABLE_DELETION_UI 1
-#endif
-
-#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
-#if !defined(ENABLE_ENCRYPTED_MEDIA)
-#define ENABLE_ENCRYPTED_MEDIA 1
-#endif
-#if !defined(ENABLE_ENCRYPTED_MEDIA_V2)
-#define ENABLE_ENCRYPTED_MEDIA_V2 1
-#endif
+#if !defined(ENABLE_LEGACY_ENCRYPTED_MEDIA)
+#define ENABLE_LEGACY_ENCRYPTED_MEDIA 1
#endif
#if !defined(ENABLE_FULLSCREEN_API)
@@ -202,8 +193,8 @@
#define ENABLE_REMOTE_INSPECTOR 1
#endif
-#if !defined(ENABLE_RUBBER_BANDING)
-#define ENABLE_RUBBER_BANDING 1
+#if !defined(ENABLE_INSPECTOR_ALTERNATE_DISPATCHERS)
+#define ENABLE_INSPECTOR_ALTERNATE_DISPATCHERS 1
#endif
#if !defined(ENABLE_SMOOTH_SCROLLING)
@@ -224,6 +215,10 @@
#define ENABLE_VIEW_MODE_CSS_MEDIA 0
#endif
+#if !defined(ENABLE_WEBASSEMBLY)
+#define ENABLE_WEBASSEMBLY (defined(ENABLE_B3_JIT) && ENABLE_B3_JIT)
+#endif
+
#if !defined(ENABLE_WEB_ARCHIVE)
#define ENABLE_WEB_ARCHIVE 1
#endif
@@ -244,14 +239,47 @@
#define ENABLE_INPUT_TYPE_COLOR_POPOVER 1
#endif
-#if !defined(ENABLE_MEDIA_SOURCE)
-#define ENABLE_MEDIA_SOURCE 1
+#if !defined(ENABLE_FILE_REPLACEMENT)
+#define ENABLE_FILE_REPLACEMENT 1
+#endif
+
+#if !defined(ENABLE_PRIMARY_SNAPSHOTTED_PLUGIN_HEURISTIC)
+#define ENABLE_PRIMARY_SNAPSHOTTED_PLUGIN_HEURISTIC 1
+#endif
+
+#if !defined(ENABLE_MAC_GESTURE_EVENTS) && USE(APPLE_INTERNAL_SDK)
+#define ENABLE_MAC_GESTURE_EVENTS 1
+#endif
+
+#endif /* PLATFORM(MAC) */
+
+#if PLATFORM(COCOA)
+
+#if !defined(ENABLE_DATA_DETECTION)
+#define ENABLE_DATA_DETECTION 1
+#endif
+
+#if !defined(ENABLE_KEYBOARD_KEY_ATTRIBUTE)
+#define ENABLE_KEYBOARD_KEY_ATTRIBUTE 1
+#endif
+
+#if !defined(ENABLE_KEYBOARD_CODE_ATTRIBUTE)
+#define ENABLE_KEYBOARD_CODE_ATTRIBUTE 1
+#endif
+
+#endif /* PLATFORM(COCOA) */
+
+#if !PLATFORM(COCOA)
+
+#if !defined(JSC_OBJC_API_ENABLED)
+#define JSC_OBJC_API_ENABLED 0
#endif
-#endif /* PLATFORM(MAC) && !PLATFORM(IOS) */
+#endif /* !PLATFORM(COCOA) */
+
/* --------- Apple Windows port --------- */
-#if PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)
+#if PLATFORM(WIN) && !PLATFORM(WIN_CAIRO)
#if !defined(ENABLE_FULLSCREEN_API)
#define ENABLE_FULLSCREEN_API 1
@@ -261,26 +289,15 @@
#define ENABLE_WEB_ARCHIVE 1
#endif
-#endif /* PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO) */
-
-/* --------- WinCE port --------- */
-/* WinCE port is a specialization of PLATFORM(WIN). */
-/* PLATFORM(WIN) is always enabled when building for the WinCE port. */
-#if PLATFORM(WIN) && OS(WINCE)
-
-#if !defined(ENABLE_DRAG_SUPPORT)
-#define ENABLE_DRAG_SUPPORT 0
-#endif
-
-#if !defined(ENABLE_FTPDIR)
-#define ENABLE_FTPDIR 0
+#if !defined(ENABLE_WEBGL)
+#define ENABLE_WEBGL 0
#endif
-#if !defined(ENABLE_INSPECTOR)
-#define ENABLE_INSPECTOR 0
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 1
#endif
-#endif /* PLATFORM(WIN) && OS(WINCE) */
+#endif /* PLATFORM(WIN) && !PLATFORM(WIN_CAIRO) */
/* --------- Windows CAIRO port --------- */
/* PLATFORM(WIN_CAIRO) is a specialization of PLATFORM(WIN). */
@@ -299,53 +316,28 @@
#define ENABLE_WEBGL 1
#endif
-#endif /* PLATFORM(WIN_CAIRO) */
-
-/* --------- EFL port (Unix) --------- */
-#if PLATFORM(EFL)
-
-#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
-#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
-#endif
-
-#if !defined(ENABLE_SUBPIXEL_LAYOUT)
-#define ENABLE_SUBPIXEL_LAYOUT 1
-#endif
-
-#if !defined(ENABLE_8BIT_TEXTRUN)
-#define ENABLE_8BIT_TEXTRUN 1
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 1
#endif
-#endif /* PLATFORM(EFL) */
+#endif /* PLATFORM(WIN_CAIRO) */
/* --------- Gtk port (Unix, Windows, Mac) --------- */
#if PLATFORM(GTK)
-
-#if OS(UNIX)
-#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
-#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
-#endif
+#if !defined(ENABLE_KEYBOARD_KEY_ATTRIBUTE)
+#define ENABLE_KEYBOARD_KEY_ATTRIBUTE 1
#endif
-#if !defined(ENABLE_SUBPIXEL_LAYOUT)
-#define ENABLE_SUBPIXEL_LAYOUT 1
+#if !defined(ENABLE_KEYBOARD_CODE_ATTRIBUTE)
+#define ENABLE_KEYBOARD_CODE_ATTRIBUTE 1
#endif
-
-#if !defined(ENABLE_8BIT_TEXTRUN)
-#define ENABLE_8BIT_TEXTRUN 1
-#endif
-
#endif /* PLATFORM(GTK) */
/* ENABLE macro defaults for WebCore */
/* Do not use PLATFORM() tests in this section ! */
-#if !defined(ENABLE_3D_RENDERING)
-#define ENABLE_3D_RENDERING 0
-#endif
-
-#if !defined(ENABLE_8BIT_TEXTRUN)
-#define ENABLE_8BIT_TEXTRUN 0
+#if !defined(ENABLE_3D_TRANSFORMS)
+#define ENABLE_3D_TRANSFORMS 0
#endif
#if !defined(ENABLE_ACCELERATED_2D_CANVAS)
@@ -356,12 +348,8 @@
#define ENABLE_ACCELERATED_OVERFLOW_SCROLLING 0
#endif
-#if !defined(ENABLE_BATTERY_STATUS)
-#define ENABLE_BATTERY_STATUS 0
-#endif
-
-#if !defined(ENABLE_BLOB)
-#define ENABLE_BLOB 0
+#if !defined(ENABLE_APNG)
+#define ENABLE_APNG 1
#endif
#if !defined(ENABLE_CANVAS_PATH)
@@ -376,16 +364,12 @@
#define ENABLE_CHANNEL_MESSAGING 1
#endif
-#if !defined(ENABLE_CONTEXT_MENUS)
-#define ENABLE_CONTEXT_MENUS 1
-#endif
-
-#if !defined(ENABLE_CSP_NEXT)
-#define ENABLE_CSP_NEXT 0
+#if !defined(ENABLE_CONTENT_EXTENSIONS)
+#define ENABLE_CONTENT_EXTENSIONS 0
#endif
-#if !defined(ENABLE_CSS3_CONDITIONAL_RULES)
-#define ENABLE_CSS3_CONDITIONAL_RULES 0
+#if !defined(ENABLE_CONTEXT_MENUS)
+#define ENABLE_CONTEXT_MENUS 1
#endif
#if !defined(ENABLE_CSS3_TEXT)
@@ -404,10 +388,6 @@
#define ENABLE_CSS_COMPOSITING 0
#endif
-#if !defined(ENABLE_CSS_FILTERS)
-#define ENABLE_CSS_FILTERS 0
-#endif
-
#if !defined(ENABLE_CSS_IMAGE_ORIENTATION)
#define ENABLE_CSS_IMAGE_ORIENTATION 0
#endif
@@ -416,18 +396,6 @@
#define ENABLE_CSS_IMAGE_RESOLUTION 0
#endif
-#if !defined(ENABLE_CSS_IMAGE_SET)
-#define ENABLE_CSS_IMAGE_SET 0
-#endif
-
-#if !defined(ENABLE_CSS_STICKY_POSITION)
-#define ENABLE_CSS_STICKY_POSITION 0
-#endif
-
-#if !defined(ENABLE_CSS_TRANSFORMS_ANIMATIONS_TRANSITIONS_UNPREFIXED)
-#define ENABLE_CSS_TRANSFORMS_ANIMATIONS_TRANSITIONS_UNPREFIXED 0
-#endif
-
#if !defined(ENABLE_CURSOR_SUPPORT)
#define ENABLE_CURSOR_SUPPORT 1
#endif
@@ -448,24 +416,12 @@
#define ENABLE_DATA_TRANSFER_ITEMS 0
#endif
-#if !defined(ENABLE_DELETION_UI)
-#define ENABLE_DELETION_UI 0
-#endif
-
-#if !defined(ENABLE_DETAILS_ELEMENT)
-#define ENABLE_DETAILS_ELEMENT 1
-#endif
-
#if !defined(ENABLE_DEVICE_ORIENTATION)
#define ENABLE_DEVICE_ORIENTATION 0
#endif
-#if !defined(ENABLE_DIRECTORY_UPLOAD)
-#define ENABLE_DIRECTORY_UPLOAD 0
-#endif
-
#if !defined(ENABLE_DOWNLOAD_ATTRIBUTE)
-#define ENABLE_DOWNLOAD_ATTRIBUTE 0
+#define ENABLE_DOWNLOAD_ATTRIBUTE 1
#endif
#if !defined(ENABLE_DRAG_SUPPORT)
@@ -476,16 +432,12 @@
#define ENABLE_ENCRYPTED_MEDIA 0
#endif
-#if !defined(ENABLE_ENCRYPTED_MEDIA_V2)
-#define ENABLE_ENCRYPTED_MEDIA_V2 0
-#endif
-
-#if !defined(ENABLE_FAST_MOBILE_SCROLLING)
-#define ENABLE_FAST_MOBILE_SCROLLING 0
+#if !defined(ENABLE_FETCH_API)
+#define ENABLE_FETCH_API 1
#endif
-#if !defined(ENABLE_FILTERS)
-#define ENABLE_FILTERS 0
+#if !defined(ENABLE_FILTERS_LEVEL_2)
+#define ENABLE_FILTERS_LEVEL_2 0
#endif
#if !defined(ENABLE_FONT_LOAD_EVENTS)
@@ -504,26 +456,18 @@
#define ENABLE_GAMEPAD 0
#endif
-#if !defined(ENABLE_GEOLOCATION)
-#define ENABLE_GEOLOCATION 0
-#endif
-
-#if !defined(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING)
-#define ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING 0
+#if !defined(ENABLE_GAMEPAD_DEPRECATED)
+#define ENABLE_GAMEPAD_DEPRECATED 0
#endif
-#if !defined(ENABLE_HIGH_DPI_CANVAS)
-#define ENABLE_HIGH_DPI_CANVAS 0
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 0
#endif
#if !defined(ENABLE_ICONDATABASE)
#define ENABLE_ICONDATABASE 1
#endif
-#if !defined(ENABLE_IFRAME_SEAMLESS)
-#define ENABLE_IFRAME_SEAMLESS 1
-#endif
-
#if !defined(ENABLE_IMAGE_DECODER_DOWN_SAMPLING)
#define ENABLE_IMAGE_DECODER_DOWN_SAMPLING 0
#endif
@@ -532,8 +476,8 @@
#define ENABLE_INDEXED_DATABASE 0
#endif
-#if !defined(ENABLE_INPUT_SPEECH)
-#define ENABLE_INPUT_SPEECH 0
+#if !defined(ENABLE_INDEXED_DATABASE_IN_WORKERS)
+#define ENABLE_INDEXED_DATABASE_IN_WORKERS 0
#endif
#if !defined(ENABLE_INPUT_TYPE_COLOR)
@@ -574,8 +518,12 @@
#endif
#endif
-#if !defined(ENABLE_INSPECTOR)
-#define ENABLE_INSPECTOR 1
+#if !defined(ENABLE_INSPECTOR_ALTERNATE_DISPATCHERS)
+#define ENABLE_INSPECTOR_ALTERNATE_DISPATCHERS 0
+#endif
+
+#if !defined(ENABLE_INTL)
+#define ENABLE_INTL 0
#endif
#if !defined(ENABLE_JAVASCRIPT_I18N_API)
@@ -586,6 +534,10 @@
#define ENABLE_LEGACY_CSS_VENDOR_PREFIXES 0
#endif
+#if !defined(ENABLE_LEGACY_ENCRYPTED_MEDIA)
+#define ENABLE_LEGACY_ENCRYPTED_MEDIA 0
+#endif
+
#if !defined(ENABLE_LEGACY_NOTIFICATIONS)
#define ENABLE_LEGACY_NOTIFICATIONS 0
#endif
@@ -594,10 +546,6 @@
#define ENABLE_LEGACY_VENDOR_PREFIXES 0
#endif
-#if !defined(ENABLE_LEGACY_VIEWPORT_ADAPTION)
-#define ENABLE_LEGACY_VIEWPORT_ADAPTION 0
-#endif
-
#if !defined(ENABLE_LETTERPRESS)
#define ENABLE_LETTERPRESS 0
#endif
@@ -642,10 +590,18 @@
#define ENABLE_MOUSE_CURSOR_SCALE 0
#endif
+#if !defined(ENABLE_MOUSE_FORCE_EVENTS)
+#define ENABLE_MOUSE_FORCE_EVENTS 1
+#endif
+
#if !defined(ENABLE_NAVIGATOR_CONTENT_UTILS)
#define ENABLE_NAVIGATOR_CONTENT_UTILS 0
#endif
+#if !defined(ENABLE_NAVIGATOR_HWCONCURRENCY)
+#define ENABLE_NAVIGATOR_HWCONCURRENCY 1
+#endif
+
#if !defined(ENABLE_NETSCAPE_PLUGIN_API)
#define ENABLE_NETSCAPE_PLUGIN_API 1
#endif
@@ -654,22 +610,10 @@
#define ENABLE_NETSCAPE_PLUGIN_METADATA_CACHE 0
#endif
-#if !defined(ENABLE_NETWORK_INFO)
-#define ENABLE_NETWORK_INFO 0
-#endif
-
#if !defined(ENABLE_NOTIFICATIONS)
#define ENABLE_NOTIFICATIONS 0
#endif
-#if !defined(ENABLE_OBJECT_MARK_LOGGING)
-#define ENABLE_OBJECT_MARK_LOGGING 0
-#endif
-
-#if !defined(ENABLE_OPENCL)
-#define ENABLE_OPENCL 0
-#endif
-
#if !defined(ENABLE_OPENTYPE_VERTICAL)
#define ENABLE_OPENTYPE_VERTICAL 0
#endif
@@ -678,34 +622,14 @@
#define ENABLE_ORIENTATION_EVENTS 0
#endif
-#if !defined(ENABLE_PAGE_VISIBILITY_API)
-#define ENABLE_PAGE_VISIBILITY_API 0
-#endif
-
#if OS(WINDOWS)
#if !defined(ENABLE_PAN_SCROLLING)
#define ENABLE_PAN_SCROLLING 1
#endif
#endif
-#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
-#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 0
-#endif
-
-#if !defined(ENABLE_PLUGIN_PROXY_FOR_VIDEO)
-#define ENABLE_PLUGIN_PROXY_FOR_VIDEO 0
-#endif
-
#if !defined(ENABLE_POINTER_LOCK)
-#define ENABLE_POINTER_LOCK 0
-#endif
-
-#if !defined(ENABLE_PROGRESS_ELEMENT)
-#define ENABLE_PROGRESS_ELEMENT 0
-#endif
-
-#if !defined(ENABLE_PROMISES)
-#define ENABLE_PROMISES 0
+#define ENABLE_POINTER_LOCK 1
#endif
#if !defined(ENABLE_PROXIMITY_EVENTS)
@@ -716,38 +640,18 @@
#define ENABLE_QUOTA 0
#endif
-#if !defined(ENABLE_REPAINT_THROTTLING)
-#define ENABLE_REPAINT_THROTTLING 0
-#endif
-
#if !defined(ENABLE_REMOTE_INSPECTOR)
#define ENABLE_REMOTE_INSPECTOR 0
#endif
-#if !defined(ENABLE_REQUEST_ANIMATION_FRAME)
-#define ENABLE_REQUEST_ANIMATION_FRAME 0
+#if !defined(ENABLE_REQUEST_AUTOCOMPLETE)
+#define ENABLE_REQUEST_AUTOCOMPLETE 0
#endif
#if !defined(ENABLE_RUBBER_BANDING)
#define ENABLE_RUBBER_BANDING 0
#endif
-#if !defined(ENABLE_SATURATED_LAYOUT_ARITHMETIC)
-#define ENABLE_SATURATED_LAYOUT_ARITHMETIC 0
-#endif
-
-#if !defined(ENABLE_SCRIPTED_SPEECH)
-#define ENABLE_SCRIPTED_SPEECH 0
-#endif
-
-#if !defined(ENABLE_SHADOW_DOM)
-#define ENABLE_SHADOW_DOM 0
-#endif
-
-#if !defined(ENABLE_SHARED_WORKERS)
-#define ENABLE_SHARED_WORKERS 0
-#endif
-
#if !defined(ENABLE_SMOOTH_SCROLLING)
#define ENABLE_SMOOTH_SCROLLING 0
#endif
@@ -760,30 +664,31 @@
#define ENABLE_SPELLCHECK 0
#endif
-#if !defined(ENABLE_SQL_DATABASE)
-#define ENABLE_SQL_DATABASE 1
-#endif
-
-#if !defined(ENABLE_SUBPIXEL_LAYOUT)
-#define ENABLE_SUBPIXEL_LAYOUT 0
+#if !defined(ENABLE_READABLE_STREAM_API)
+#if PLATFORM(WIN)
+#define ENABLE_READABLE_STREAM_API 0
+#else
+#define ENABLE_READABLE_STREAM_API 1
#endif
-
-#if !defined(ENABLE_SVG)
-#define ENABLE_SVG 1
+#if !defined(ENABLE_READABLE_BYTE_STREAM_API)
+#if PLATFORM(WIN)
+#define ENABLE_READABLE_BYTE_STREAM_API 0
+#else
+#define ENABLE_READABLE_BYTE_STREAM_API 1
#endif
-
-#if ENABLE(SVG)
-#if !defined(ENABLE_SVG_FONTS)
-#define ENABLE_SVG_FONTS 1
#endif
#endif
-#if !defined(ENABLE_TEMPLATE_ELEMENT)
-#define ENABLE_TEMPLATE_ELEMENT 1
+#if !defined(ENABLE_WRITABLE_STREAM_API)
+#if PLATFORM(WIN)
+#define ENABLE_WRITABLE_STREAM_API 0
+#else
+#define ENABLE_WRITABLE_STREAM_API 1
+#endif
#endif
-#if !defined(ENABLE_TEXT_AUTOSIZING)
-#define ENABLE_TEXT_AUTOSIZING 0
+#if !defined(ENABLE_SVG_FONTS)
+#define ENABLE_SVG_FONTS 1
#endif
#if !defined(ENABLE_TEXT_CARET)
@@ -818,8 +723,8 @@
#define ENABLE_VIDEO_TRACK 0
#endif
-#if !defined(ENABLE_VIEWPORT)
-#define ENABLE_VIEWPORT 0
+#if !defined(ENABLE_DATACUE_VALUE)
+#define ENABLE_DATACUE_VALUE 0
#endif
#if !defined(ENABLE_VIEW_MODE_CSS_MEDIA)
@@ -830,8 +735,12 @@
#define ENABLE_WEBGL 0
#endif
+#if !defined(ENABLE_GRAPHICS_CONTEXT_3D)
+#define ENABLE_GRAPHICS_CONTEXT_3D ENABLE_WEBGL
+#endif
+
#if !defined(ENABLE_WEB_ANIMATIONS)
-#define ENABLE_WEB_ANIMATIONS 0
+#define ENABLE_WEB_ANIMATIONS 1
#endif
#if !defined(ENABLE_WEB_ARCHIVE)
@@ -858,34 +767,42 @@
#define ENABLE_WILL_REVEAL_EDGE_EVENTS 1
#endif
-#if !defined(ENABLE_XHR_TIMEOUT)
-#define ENABLE_XHR_TIMEOUT 0
-#endif
-
#if !defined(ENABLE_XSLT)
#define ENABLE_XSLT 1
#endif
-/* Asserts, invariants for macro definitions */
+#if !defined(ENABLE_KEYBOARD_KEY_ATTRIBUTE)
+#define ENABLE_KEYBOARD_KEY_ATTRIBUTE 0
+#endif
-#if ENABLE(SATURATED_LAYOUT_ARITHMETIC) && !ENABLE(SUBPIXEL_LAYOUT)
-#error "ENABLE(SATURATED_LAYOUT_ARITHMETIC) requires ENABLE(SUBPIXEL_LAYOUT)"
+#if !defined(ENABLE_KEYBOARD_CODE_ATTRIBUTE)
+#define ENABLE_KEYBOARD_CODE_ATTRIBUTE 0
#endif
-#if ENABLE(SVG_FONTS) && !ENABLE(SVG)
-#error "ENABLE(SVG_FONTS) requires ENABLE(SVG)"
+#if !defined(ENABLE_DATA_INTERACTION)
+#define ENABLE_DATA_INTERACTION 0
#endif
+/* Asserts, invariants for macro definitions */
+
#if ENABLE(VIDEO_TRACK) && !ENABLE(VIDEO)
#error "ENABLE(VIDEO_TRACK) requires ENABLE(VIDEO)"
#endif
-#if ENABLE(REMOTE_INSPECTOR) && !ENABLE(INSPECTOR)
-#error "ENABLE(REMOTE_INSPECTOR) requires ENABLE(INSPECTOR)"
+#if ENABLE(MEDIA_CONTROLS_SCRIPT) && !ENABLE(VIDEO)
+#error "ENABLE(MEDIA_CONTROLS_SCRIPT) requires ENABLE(VIDEO)"
+#endif
+
+#if ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS) && !ENABLE(REMOTE_INSPECTOR)
+#error "ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS) requires ENABLE(REMOTE_INSPECTOR)"
#endif
#if ENABLE(IOS_TOUCH_EVENTS) && !ENABLE(TOUCH_EVENTS)
#error "ENABLE(IOS_TOUCH_EVENTS) requires ENABLE(TOUCH_EVENTS)"
#endif
+#if ENABLE(WEBGL) && !ENABLE(GRAPHICS_CONTEXT_3D)
+#error "ENABLE(WEBGL) requires ENABLE(GRAPHICS_CONTEXT_3D)"
+#endif
+
#endif /* WTF_FeatureDefines_h */
diff --git a/Source/WTF/wtf/FilePrintStream.h b/Source/WTF/wtf/FilePrintStream.h
index f698a0270..cf8222172 100644
--- a/Source/WTF/wtf/FilePrintStream.h
+++ b/Source/WTF/wtf/FilePrintStream.h
@@ -28,7 +28,6 @@
#include <stdio.h>
#include <wtf/PrintStream.h>
-#include <wtf/OwnPtr.h>
namespace WTF {
@@ -46,8 +45,8 @@ public:
FILE* file() { return m_file; }
- virtual void vprintf(const char* format, va_list) override WTF_ATTRIBUTE_PRINTF(2, 0);
- virtual void flush() override;
+ void vprintf(const char* format, va_list) override WTF_ATTRIBUTE_PRINTF(2, 0);
+ void flush() override;
private:
FILE* m_file;
diff --git a/Source/WTF/wtf/FlipBytes.h b/Source/WTF/wtf/FlipBytes.h
index e66fabe62..5041a8468 100644
--- a/Source/WTF/wtf/FlipBytes.h
+++ b/Source/WTF/wtf/FlipBytes.h
@@ -26,8 +26,6 @@
#ifndef FlipBytes_h
#define FlipBytes_h
-#include <wtf/Platform.h>
-
namespace WTF {
inline bool needToFlipBytesIfLittleEndian(bool littleEndian)
diff --git a/Source/WTF/wtf/ForbidHeapAllocation.h b/Source/WTF/wtf/ForbidHeapAllocation.h
new file mode 100644
index 000000000..9547aaf68
--- /dev/null
+++ b/Source/WTF/wtf/ForbidHeapAllocation.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#define WTF_FORBID_HEAP_ALLOCATION \
+private: \
+ void* operator new(size_t, void*) = delete; \
+ void* operator new[](size_t, void*) = delete; \
+ void* operator new(size_t) = delete; \
+ void operator delete(void*) = delete; \
+ void* operator new[](size_t size) = delete; \
+ void operator delete[](void*) = delete; \
+ void* operator new(size_t, NotNullTag, void* location) = delete; \
+ typedef int __thisIsHereToForceASemicolonAfterThisForbidHeapAllocationMacro
diff --git a/Source/WTF/wtf/Forward.h b/Source/WTF/wtf/Forward.h
index 215247fa7..d68a947d4 100644
--- a/Source/WTF/wtf/Forward.h
+++ b/Source/WTF/wtf/Forward.h
@@ -18,37 +18,40 @@
*
*/
-#ifndef WTF_Forward_h
-#define WTF_Forward_h
+#pragma once
#include <stddef.h>
+namespace std {
+template<typename T> class optional;
+}
+
namespace WTF {
template<typename T> class Function;
+template<typename T> class LazyNeverDestroyed;
template<typename T> class NeverDestroyed;
-template<typename T> class OwnPtr;
-template<typename T> class PassOwnPtr;
-template<typename T> class PassRef;
+template<typename T> class OptionSet;
template<typename T> class PassRefPtr;
-template<typename T> class RefPtr;
template<typename T> class Ref;
+template<typename T> class RefPtr;
template<typename T> class StringBuffer;
-template<typename T, size_t inlineCapacity, typename OverflowHandler> class Vector;
+template<typename... T> class Variant;
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> class Vector;
class AtomicString;
class AtomicStringImpl;
class BinarySemaphore;
class CString;
-class Decoder;
-class Encoder;
class FunctionDispatcher;
+class OrdinalNumber;
class PrintStream;
class String;
class StringBuilder;
class StringImpl;
class StringView;
+class TextPosition;
}
@@ -56,14 +59,12 @@ using WTF::AtomicString;
using WTF::AtomicStringImpl;
using WTF::BinarySemaphore;
using WTF::CString;
-using WTF::Decoder;
-using WTF::Encoder;
using WTF::Function;
using WTF::FunctionDispatcher;
+using WTF::LazyNeverDestroyed;
using WTF::NeverDestroyed;
-using WTF::OwnPtr;
-using WTF::PassOwnPtr;
-using WTF::PassRef;
+using WTF::OptionSet;
+using WTF::OrdinalNumber;
using WTF::PassRefPtr;
using WTF::PrintStream;
using WTF::Ref;
@@ -73,6 +74,6 @@ using WTF::StringBuffer;
using WTF::StringBuilder;
using WTF::StringImpl;
using WTF::StringView;
+using WTF::TextPosition;
+using WTF::Variant;
using WTF::Vector;
-
-#endif // WTF_Forward_h
diff --git a/Source/WTF/wtf/Function.h b/Source/WTF/wtf/Function.h
new file mode 100644
index 000000000..1c9585623
--- /dev/null
+++ b/Source/WTF/wtf/Function.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <memory>
+#include <wtf/FastMalloc.h>
+
+namespace WTF {
+
+template<typename> class Function;
+
+template <typename Out, typename... In>
+class Function<Out(In...)> {
+public:
+ Function() = default;
+ Function(std::nullptr_t) { }
+
+ template<typename CallableType, class = typename std::enable_if<std::is_rvalue_reference<CallableType&&>::value>::type>
+ Function(CallableType&& callable)
+ : m_callableWrapper(std::make_unique<CallableWrapper<CallableType>>(WTFMove(callable)))
+ {
+ }
+
+ Out operator()(In... in) const
+ {
+ if (m_callableWrapper)
+ return m_callableWrapper->call(std::forward<In>(in)...);
+ return Out();
+ }
+
+ explicit operator bool() const { return !!m_callableWrapper; }
+
+ template<typename CallableType, class = typename std::enable_if<std::is_rvalue_reference<CallableType&&>::value>::type>
+ Function& operator=(CallableType&& callable)
+ {
+ m_callableWrapper = std::make_unique<CallableWrapper<CallableType>>(WTFMove(callable));
+ return *this;
+ }
+
+ Function& operator=(std::nullptr_t)
+ {
+ m_callableWrapper = nullptr;
+ return *this;
+ }
+
+private:
+ class CallableWrapperBase {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ virtual ~CallableWrapperBase() { }
+
+ virtual Out call(In...) = 0;
+ };
+
+ template<typename CallableType>
+ class CallableWrapper : public CallableWrapperBase {
+ public:
+ explicit CallableWrapper(CallableType&& callable)
+ : m_callable(WTFMove(callable))
+ {
+ }
+
+ CallableWrapper(const CallableWrapper&) = delete;
+ CallableWrapper& operator=(const CallableWrapper&) = delete;
+
+ Out call(In... in) final { return m_callable(std::forward<In>(in)...); }
+
+ private:
+ CallableType m_callable;
+ };
+
+ std::unique_ptr<CallableWrapperBase> m_callableWrapper;
+};
+
+} // namespace WTF
+
+using WTF::Function;
diff --git a/Source/WTF/wtf/FunctionDispatcher.h b/Source/WTF/wtf/FunctionDispatcher.h
index 4e202345d..344a16020 100644
--- a/Source/WTF/wtf/FunctionDispatcher.h
+++ b/Source/WTF/wtf/FunctionDispatcher.h
@@ -23,7 +23,11 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifndef FunctionDispatcher_h
+#define FunctionDispatcher_h
+
#include <functional>
+#include <wtf/Function.h>
#include <wtf/ThreadSafeRefCounted.h>
namespace WTF {
@@ -35,7 +39,7 @@ class FunctionDispatcher : public ThreadSafeRefCounted<FunctionDispatcher> {
public:
WTF_EXPORT_PRIVATE virtual ~FunctionDispatcher();
- virtual void dispatch(std::function<void ()>) = 0;
+ virtual void dispatch(Function<void ()>&&) = 0;
protected:
WTF_EXPORT_PRIVATE FunctionDispatcher();
@@ -44,3 +48,5 @@ protected:
} // namespace WTF
using WTF::FunctionDispatcher;
+
+#endif // FunctionDispatcher_h
diff --git a/Source/WTF/wtf/Functional.h b/Source/WTF/wtf/Functional.h
deleted file mode 100644
index 45ca9411b..000000000
--- a/Source/WTF/wtf/Functional.h
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef WTF_Functional_h
-#define WTF_Functional_h
-
-#include <wtf/Assertions.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefPtr.h>
-#include <wtf/ThreadSafeRefCounted.h>
-#include <wtf/WeakPtr.h>
-
-#if OS(DARWIN) && COMPILER_SUPPORTS(BLOCKS)
-#include <Block.h>
-#include <wtf/ObjcRuntimeExtras.h>
-#endif
-
-namespace WTF {
-
-// Functional.h provides a very simple way to bind a function pointer and arguments together into a function object
-// that can be stored, copied and invoked, similar to how boost::bind and std::bind in C++11.
-
-// Helper class template to determine whether a given type has ref and deref member functions
-// with the right type signature.
-template<typename T>
-class HasRefAndDeref {
- typedef char YesType;
- struct NoType {
- char padding[8];
- };
-
- struct BaseMixin {
- void deref();
- void ref();
- };
-
- struct Base : public T, public BaseMixin { };
-
- template<typename U, U> struct
- TypeChecker { };
-
- template<typename U>
- static NoType refCheck(U*, TypeChecker<void (BaseMixin::*)(), &U::ref>* = 0);
- static YesType refCheck(...);
-
- template<typename U>
- static NoType derefCheck(U*, TypeChecker<void (BaseMixin::*)(), &U::deref>* = 0);
- static YesType derefCheck(...);
-
-public:
- static const bool value = sizeof(refCheck(static_cast<Base*>(0))) == sizeof(YesType) && sizeof(derefCheck(static_cast<Base*>(0))) == sizeof(YesType);
-};
-
-// A FunctionWrapper is a class template that can wrap a function pointer or a member function pointer and
-// provide a unified interface for calling that function.
-template<typename>
-class FunctionWrapper;
-
-// Bound static functions:
-
-template<typename R>
-class FunctionWrapper<R (*)()> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = false;
-
- explicit FunctionWrapper(R (*function)())
- : m_function(function)
- {
- }
-
- R operator()()
- {
- return m_function();
- }
-
-private:
- R (*m_function)();
-};
-
-template<typename R, typename P1>
-class FunctionWrapper<R (*)(P1)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = false;
-
- explicit FunctionWrapper(R (*function)(P1))
- : m_function(function)
- {
- }
-
- R operator()(P1 p1)
- {
- return m_function(p1);
- }
-
-private:
- R (*m_function)(P1);
-};
-
-template<typename R, typename P1, typename P2>
-class FunctionWrapper<R (*)(P1, P2)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = false;
-
- explicit FunctionWrapper(R (*function)(P1, P2))
- : m_function(function)
- {
- }
-
- R operator()(P1 p1, P2 p2)
- {
- return m_function(p1, p2);
- }
-
-private:
- R (*m_function)(P1, P2);
-};
-
-template<typename R, typename P1, typename P2, typename P3>
-class FunctionWrapper<R (*)(P1, P2, P3)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = false;
-
- explicit FunctionWrapper(R (*function)(P1, P2, P3))
- : m_function(function)
- {
- }
-
- R operator()(P1 p1, P2 p2, P3 p3)
- {
- return m_function(p1, p2, p3);
- }
-
-private:
- R (*m_function)(P1, P2, P3);
-};
-
-template<typename R, typename P1, typename P2, typename P3, typename P4>
-class FunctionWrapper<R (*)(P1, P2, P3, P4)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = false;
-
- explicit FunctionWrapper(R (*function)(P1, P2, P3, P4))
- : m_function(function)
- {
- }
-
- R operator()(P1 p1, P2 p2, P3 p3, P4 p4)
- {
- return m_function(p1, p2, p3, p4);
- }
-
-private:
- R (*m_function)(P1, P2, P3, P4);
-};
-
-template<typename R, typename P1, typename P2, typename P3, typename P4, typename P5>
-class FunctionWrapper<R (*)(P1, P2, P3, P4, P5)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = false;
-
- explicit FunctionWrapper(R (*function)(P1, P2, P3, P4, P5))
- : m_function(function)
- {
- }
-
- R operator()(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5)
- {
- return m_function(p1, p2, p3, p4, p5);
- }
-
-private:
- R (*m_function)(P1, P2, P3, P4, P5);
-};
-
-// Bound member functions:
-
-template<typename R, typename C>
-class FunctionWrapper<R (C::*)()> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = HasRefAndDeref<C>::value;
-
- explicit FunctionWrapper(R (C::*function)())
- : m_function(function)
- {
- }
-
- R operator()(C* c)
- {
- return (c->*m_function)();
- }
-
- R operator()(const WeakPtr<C>& c)
- {
- C* obj = c.get();
- if (!obj)
- return R();
- return (obj->*m_function)();
- }
-
-private:
- R (C::*m_function)();
-};
-
-template<typename R, typename C, typename P1>
-class FunctionWrapper<R (C::*)(P1)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = HasRefAndDeref<C>::value;
-
- explicit FunctionWrapper(R (C::*function)(P1))
- : m_function(function)
- {
- }
-
- R operator()(C* c, P1 p1)
- {
- return (c->*m_function)(p1);
- }
-
- R operator()(const WeakPtr<C>& c, P1 p1)
- {
- C* obj = c.get();
- if (!obj)
- return R();
- return (obj->*m_function)(p1);
- }
-
-private:
- R (C::*m_function)(P1);
-};
-
-template<typename R, typename C, typename P1, typename P2>
-class FunctionWrapper<R (C::*)(P1, P2)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = HasRefAndDeref<C>::value;
-
- explicit FunctionWrapper(R (C::*function)(P1, P2))
- : m_function(function)
- {
- }
-
- R operator()(C* c, P1 p1, P2 p2)
- {
- return (c->*m_function)(p1, p2);
- }
-
- R operator()(const WeakPtr<C>& c, P1 p1, P2 p2)
- {
- C* obj = c.get();
- if (!obj)
- return R();
- return (obj->*m_function)(p1, p2);
- }
-
-private:
- R (C::*m_function)(P1, P2);
-};
-
-template<typename R, typename C, typename P1, typename P2, typename P3>
-class FunctionWrapper<R (C::*)(P1, P2, P3)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = HasRefAndDeref<C>::value;
-
- explicit FunctionWrapper(R (C::*function)(P1, P2, P3))
- : m_function(function)
- {
- }
-
- R operator()(C* c, P1 p1, P2 p2, P3 p3)
- {
- return (c->*m_function)(p1, p2, p3);
- }
-
- R operator()(const WeakPtr<C>& c, P1 p1, P2 p2, P3 p3)
- {
- C* obj = c.get();
- if (!obj)
- return R();
- return (obj->*m_function)(p1, p2, p3);
- }
-
-private:
- R (C::*m_function)(P1, P2, P3);
-};
-
-template<typename R, typename C, typename P1, typename P2, typename P3, typename P4>
-class FunctionWrapper<R (C::*)(P1, P2, P3, P4)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = HasRefAndDeref<C>::value;
-
- explicit FunctionWrapper(R (C::*function)(P1, P2, P3, P4))
- : m_function(function)
- {
- }
-
- R operator()(C* c, P1 p1, P2 p2, P3 p3, P4 p4)
- {
- return (c->*m_function)(p1, p2, p3, p4);
- }
-
- R operator()(const WeakPtr<C>& c, P1 p1, P2 p2, P3 p3, P4 p4)
- {
- C* obj = c.get();
- if (!obj)
- return R();
- return (obj->*m_function)(p1, p2, p3, p4);
- }
-
-private:
- R (C::*m_function)(P1, P2, P3, P4);
-};
-
-template<typename R, typename C, typename P1, typename P2, typename P3, typename P4, typename P5>
-class FunctionWrapper<R (C::*)(P1, P2, P3, P4, P5)> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = HasRefAndDeref<C>::value;
-
- explicit FunctionWrapper(R (C::*function)(P1, P2, P3, P4, P5))
- : m_function(function)
- {
- }
-
- R operator()(C* c, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5)
- {
- return (c->*m_function)(p1, p2, p3, p4, p5);
- }
-
- R operator()(const WeakPtr<C>& c, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5)
- {
- C* obj = c.get();
- if (!obj)
- return R();
- return (obj->*m_function)(p1, p2, p3, p4, p5);
- }
-
-private:
- R (C::*m_function)(P1, P2, P3, P4, P5);
-};
-
-#if OS(DARWIN) && COMPILER_SUPPORTS(BLOCKS)
-template<typename R>
-class FunctionWrapper<R (^)()> {
-public:
- typedef R ResultType;
- static const bool shouldRefFirstParameter = false;
-
- explicit FunctionWrapper(R (^block)())
- : m_block(Block_copy(block))
- {
- }
-
- FunctionWrapper(const FunctionWrapper& other)
- : m_block(Block_copy(other.m_block))
- {
- }
-
- ~FunctionWrapper()
- {
- Block_release(m_block);
- }
-
- R operator()()
- {
- return m_block();
- }
-
-private:
- R (^m_block)();
-};
-#endif
-
-template<typename T, bool shouldRefAndDeref> struct RefAndDeref {
- static void ref(T) { }
- static void deref(T) { }
-};
-
-template<typename T> struct RefAndDeref<T*, true> {
- static void ref(T* t) { t->ref(); }
- static void deref(T* t) { t->deref(); }
-};
-
-template<typename T> struct ParamStorageTraits {
- typedef T StorageType;
-
- static StorageType wrap(const T& value) { return value; }
- static const T& unwrap(const StorageType& value) { return value; }
-};
-
-template<typename T> struct ParamStorageTraits<PassRefPtr<T>> {
- typedef RefPtr<T> StorageType;
-
- static StorageType wrap(PassRefPtr<T> value) { return value; }
- static T* unwrap(const StorageType& value) { return value.get(); }
-};
-
-template<typename T> struct ParamStorageTraits<RefPtr<T>> {
- typedef RefPtr<T> StorageType;
-
- static StorageType wrap(RefPtr<T> value) { return value.release(); }
- static T* unwrap(const StorageType& value) { return value.get(); }
-};
-
-template<typename> class RetainPtr;
-
-template<typename T> struct ParamStorageTraits<RetainPtr<T>> {
- typedef RetainPtr<T> StorageType;
-
- static StorageType wrap(const RetainPtr<T>& value) { return value; }
- static typename RetainPtr<T>::PtrType unwrap(const StorageType& value) { return value.get(); }
-};
-
-class FunctionImplBase : public ThreadSafeRefCounted<FunctionImplBase> {
-public:
- virtual ~FunctionImplBase() { }
-};
-
-template<typename>
-class FunctionImpl;
-
-template<typename R>
-class FunctionImpl<R ()> : public FunctionImplBase {
-public:
- virtual R operator()() = 0;
-};
-
-template<typename FunctionWrapper, typename FunctionType>
-class BoundFunctionImpl;
-
-template<typename FunctionWrapper, typename R>
-class BoundFunctionImpl<FunctionWrapper, R ()> : public FunctionImpl<typename FunctionWrapper::ResultType ()> {
-public:
- explicit BoundFunctionImpl(FunctionWrapper functionWrapper)
- : m_functionWrapper(functionWrapper)
- {
- }
-
- virtual typename FunctionWrapper::ResultType operator()()
- {
- return m_functionWrapper();
- }
-
-private:
- FunctionWrapper m_functionWrapper;
-};
-
-template<typename FunctionWrapper, typename R, typename P1>
-class BoundFunctionImpl<FunctionWrapper, R (P1)> : public FunctionImpl<typename FunctionWrapper::ResultType ()> {
-public:
- BoundFunctionImpl(FunctionWrapper functionWrapper, const P1& p1)
- : m_functionWrapper(functionWrapper)
- , m_p1(ParamStorageTraits<P1>::wrap(p1))
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::ref(m_p1);
- }
-
- ~BoundFunctionImpl()
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::deref(m_p1);
- }
-
- virtual typename FunctionWrapper::ResultType operator()()
- {
- return m_functionWrapper(ParamStorageTraits<P1>::unwrap(m_p1));
- }
-
-private:
- FunctionWrapper m_functionWrapper;
- typename ParamStorageTraits<P1>::StorageType m_p1;
-};
-
-template<typename FunctionWrapper, typename R, typename P1, typename P2>
-class BoundFunctionImpl<FunctionWrapper, R (P1, P2)> : public FunctionImpl<typename FunctionWrapper::ResultType ()> {
-public:
- BoundFunctionImpl(FunctionWrapper functionWrapper, const P1& p1, const P2& p2)
- : m_functionWrapper(functionWrapper)
- , m_p1(ParamStorageTraits<P1>::wrap(p1))
- , m_p2(ParamStorageTraits<P2>::wrap(p2))
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::ref(m_p1);
- }
-
- ~BoundFunctionImpl()
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::deref(m_p1);
- }
-
- virtual typename FunctionWrapper::ResultType operator()()
- {
- return m_functionWrapper(ParamStorageTraits<P1>::unwrap(m_p1), ParamStorageTraits<P2>::unwrap(m_p2));
- }
-
-private:
- FunctionWrapper m_functionWrapper;
- typename ParamStorageTraits<P1>::StorageType m_p1;
- typename ParamStorageTraits<P2>::StorageType m_p2;
-};
-
-template<typename FunctionWrapper, typename R, typename P1, typename P2, typename P3>
-class BoundFunctionImpl<FunctionWrapper, R (P1, P2, P3)> : public FunctionImpl<typename FunctionWrapper::ResultType ()> {
-public:
- BoundFunctionImpl(FunctionWrapper functionWrapper, const P1& p1, const P2& p2, const P3& p3)
- : m_functionWrapper(functionWrapper)
- , m_p1(ParamStorageTraits<P1>::wrap(p1))
- , m_p2(ParamStorageTraits<P2>::wrap(p2))
- , m_p3(ParamStorageTraits<P3>::wrap(p3))
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::ref(m_p1);
- }
-
- ~BoundFunctionImpl()
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::deref(m_p1);
- }
-
- virtual typename FunctionWrapper::ResultType operator()()
- {
- return m_functionWrapper(ParamStorageTraits<P1>::unwrap(m_p1), ParamStorageTraits<P2>::unwrap(m_p2), ParamStorageTraits<P3>::unwrap(m_p3));
- }
-
-private:
- FunctionWrapper m_functionWrapper;
- typename ParamStorageTraits<P1>::StorageType m_p1;
- typename ParamStorageTraits<P2>::StorageType m_p2;
- typename ParamStorageTraits<P3>::StorageType m_p3;
-};
-
-template<typename FunctionWrapper, typename R, typename P1, typename P2, typename P3, typename P4>
-class BoundFunctionImpl<FunctionWrapper, R (P1, P2, P3, P4)> : public FunctionImpl<typename FunctionWrapper::ResultType ()> {
-public:
- BoundFunctionImpl(FunctionWrapper functionWrapper, const P1& p1, const P2& p2, const P3& p3, const P4& p4)
- : m_functionWrapper(functionWrapper)
- , m_p1(ParamStorageTraits<P1>::wrap(p1))
- , m_p2(ParamStorageTraits<P2>::wrap(p2))
- , m_p3(ParamStorageTraits<P3>::wrap(p3))
- , m_p4(ParamStorageTraits<P4>::wrap(p4))
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::ref(m_p1);
- }
-
- ~BoundFunctionImpl()
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::deref(m_p1);
- }
-
- virtual typename FunctionWrapper::ResultType operator()()
- {
- return m_functionWrapper(ParamStorageTraits<P1>::unwrap(m_p1), ParamStorageTraits<P2>::unwrap(m_p2), ParamStorageTraits<P3>::unwrap(m_p3), ParamStorageTraits<P4>::unwrap(m_p4));
- }
-
-private:
- FunctionWrapper m_functionWrapper;
- typename ParamStorageTraits<P1>::StorageType m_p1;
- typename ParamStorageTraits<P2>::StorageType m_p2;
- typename ParamStorageTraits<P3>::StorageType m_p3;
- typename ParamStorageTraits<P4>::StorageType m_p4;
-};
-
-template<typename FunctionWrapper, typename R, typename P1, typename P2, typename P3, typename P4, typename P5>
-class BoundFunctionImpl<FunctionWrapper, R (P1, P2, P3, P4, P5)> : public FunctionImpl<typename FunctionWrapper::ResultType ()> {
-public:
- BoundFunctionImpl(FunctionWrapper functionWrapper, const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5)
- : m_functionWrapper(functionWrapper)
- , m_p1(ParamStorageTraits<P1>::wrap(p1))
- , m_p2(ParamStorageTraits<P2>::wrap(p2))
- , m_p3(ParamStorageTraits<P3>::wrap(p3))
- , m_p4(ParamStorageTraits<P4>::wrap(p4))
- , m_p5(ParamStorageTraits<P5>::wrap(p5))
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::ref(m_p1);
- }
-
- ~BoundFunctionImpl()
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::deref(m_p1);
- }
-
- virtual typename FunctionWrapper::ResultType operator()()
- {
- return m_functionWrapper(ParamStorageTraits<P1>::unwrap(m_p1), ParamStorageTraits<P2>::unwrap(m_p2), ParamStorageTraits<P3>::unwrap(m_p3), ParamStorageTraits<P4>::unwrap(m_p4), ParamStorageTraits<P5>::unwrap(m_p5));
- }
-
-private:
- FunctionWrapper m_functionWrapper;
- typename ParamStorageTraits<P1>::StorageType m_p1;
- typename ParamStorageTraits<P2>::StorageType m_p2;
- typename ParamStorageTraits<P3>::StorageType m_p3;
- typename ParamStorageTraits<P4>::StorageType m_p4;
- typename ParamStorageTraits<P5>::StorageType m_p5;
-};
-
-template<typename FunctionWrapper, typename R, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
-class BoundFunctionImpl<FunctionWrapper, R (P1, P2, P3, P4, P5, P6)> : public FunctionImpl<typename FunctionWrapper::ResultType ()> {
-public:
- BoundFunctionImpl(FunctionWrapper functionWrapper, const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6)
- : m_functionWrapper(functionWrapper)
- , m_p1(ParamStorageTraits<P1>::wrap(p1))
- , m_p2(ParamStorageTraits<P2>::wrap(p2))
- , m_p3(ParamStorageTraits<P3>::wrap(p3))
- , m_p4(ParamStorageTraits<P4>::wrap(p4))
- , m_p5(ParamStorageTraits<P5>::wrap(p5))
- , m_p6(ParamStorageTraits<P6>::wrap(p6))
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::ref(m_p1);
- }
-
- ~BoundFunctionImpl()
- {
- RefAndDeref<P1, FunctionWrapper::shouldRefFirstParameter>::deref(m_p1);
- }
-
- virtual typename FunctionWrapper::ResultType operator()()
- {
- return m_functionWrapper(ParamStorageTraits<P1>::unwrap(m_p1), ParamStorageTraits<P2>::unwrap(m_p2), ParamStorageTraits<P3>::unwrap(m_p3), ParamStorageTraits<P4>::unwrap(m_p4), ParamStorageTraits<P5>::unwrap(m_p5), ParamStorageTraits<P6>::unwrap(m_p6));
- }
-
-private:
- FunctionWrapper m_functionWrapper;
- typename ParamStorageTraits<P1>::StorageType m_p1;
- typename ParamStorageTraits<P2>::StorageType m_p2;
- typename ParamStorageTraits<P3>::StorageType m_p3;
- typename ParamStorageTraits<P4>::StorageType m_p4;
- typename ParamStorageTraits<P5>::StorageType m_p5;
- typename ParamStorageTraits<P6>::StorageType m_p6;
-};
-
-class FunctionBase {
-public:
- bool isNull() const
- {
- return !m_impl;
- }
-
-protected:
- FunctionBase()
- {
- }
-
- explicit FunctionBase(PassRefPtr<FunctionImplBase> impl)
- : m_impl(impl)
- {
- }
-
- template<typename FunctionType> FunctionImpl<FunctionType>* impl() const
- {
- return static_cast<FunctionImpl<FunctionType>*>(m_impl.get());
- }
-
-private:
- RefPtr<FunctionImplBase> m_impl;
-};
-
-template<typename>
-class Function;
-
-template<typename R>
-class Function<R ()> : public FunctionBase {
-public:
- Function()
- {
- }
-
- Function(PassRefPtr<FunctionImpl<R ()>> impl)
- : FunctionBase(impl)
- {
- }
-
- R operator()() const
- {
- ASSERT(!isNull());
-
- return impl<R ()>()->operator()();
- }
-
-#if OS(DARWIN) && COMPILER_SUPPORTS(BLOCKS)
- typedef void (^BlockType)();
- operator BlockType() const
- {
- // Declare a RefPtr here so we'll be sure that the underlying FunctionImpl object's
- // lifecycle is managed correctly.
- RefPtr<FunctionImpl<R ()>> functionImpl = impl<R ()>();
- BlockType block = ^{
- functionImpl->operator()();
- };
-
- // This is equivalent to:
- //
- // return [[block copy] autorelease];
- //
- // We're using manual objc_msgSend calls here because we don't want to make the entire
- // file Objective-C. It's useful to be able to implicitly convert a Function to
- // a block even in C++ code, since that allows us to do things like:
- //
- // dispatch_async(queue, bind(...));
- //
- id copiedBlock = wtfObjcMsgSend<id>((id)block, sel_registerName("copy"));
- id autoreleasedBlock = wtfObjcMsgSend<id>(copiedBlock, sel_registerName("autorelease"));
- return (BlockType)autoreleasedBlock;
- }
-#endif
-};
-
-template<typename FunctionType>
-Function<typename FunctionWrapper<FunctionType>::ResultType ()> bind(FunctionType function)
-{
- return Function<typename FunctionWrapper<FunctionType>::ResultType ()>(adoptRef(new BoundFunctionImpl<FunctionWrapper<FunctionType>, typename FunctionWrapper<FunctionType>::ResultType ()>(FunctionWrapper<FunctionType>(function))));
-}
-
-template<typename FunctionType, typename A1>
-Function<typename FunctionWrapper<FunctionType>::ResultType ()> bind(FunctionType function, const A1& a1)
-{
- return Function<typename FunctionWrapper<FunctionType>::ResultType ()>(adoptRef(new BoundFunctionImpl<FunctionWrapper<FunctionType>, typename FunctionWrapper<FunctionType>::ResultType (A1)>(FunctionWrapper<FunctionType>(function), a1)));
-}
-
-template<typename FunctionType, typename A1, typename A2>
-Function<typename FunctionWrapper<FunctionType>::ResultType ()> bind(FunctionType function, const A1& a1, const A2& a2)
-{
- return Function<typename FunctionWrapper<FunctionType>::ResultType ()>(adoptRef(new BoundFunctionImpl<FunctionWrapper<FunctionType>, typename FunctionWrapper<FunctionType>::ResultType (A1, A2)>(FunctionWrapper<FunctionType>(function), a1, a2)));
-}
-
-template<typename FunctionType, typename A1, typename A2, typename A3>
-Function<typename FunctionWrapper<FunctionType>::ResultType ()> bind(FunctionType function, const A1& a1, const A2& a2, const A3& a3)
-{
- return Function<typename FunctionWrapper<FunctionType>::ResultType ()>(adoptRef(new BoundFunctionImpl<FunctionWrapper<FunctionType>, typename FunctionWrapper<FunctionType>::ResultType (A1, A2, A3)>(FunctionWrapper<FunctionType>(function), a1, a2, a3)));
-}
-
-template<typename FunctionType, typename A1, typename A2, typename A3, typename A4>
-Function<typename FunctionWrapper<FunctionType>::ResultType ()> bind(FunctionType function, const A1& a1, const A2& a2, const A3& a3, const A4& a4)
-{
- return Function<typename FunctionWrapper<FunctionType>::ResultType ()>(adoptRef(new BoundFunctionImpl<FunctionWrapper<FunctionType>, typename FunctionWrapper<FunctionType>::ResultType (A1, A2, A3, A4)>(FunctionWrapper<FunctionType>(function), a1, a2, a3, a4)));
-}
-
-template<typename FunctionType, typename A1, typename A2, typename A3, typename A4, typename A5>
-Function<typename FunctionWrapper<FunctionType>::ResultType ()> bind(FunctionType function, const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5)
-{
- return Function<typename FunctionWrapper<FunctionType>::ResultType ()>(adoptRef(new BoundFunctionImpl<FunctionWrapper<FunctionType>, typename FunctionWrapper<FunctionType>::ResultType (A1, A2, A3, A4, A5)>(FunctionWrapper<FunctionType>(function), a1, a2, a3, a4, a5)));
-}
-
-template<typename FunctionType, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6>
-Function<typename FunctionWrapper<FunctionType>::ResultType ()> bind(FunctionType function, const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6)
-{
- return Function<typename FunctionWrapper<FunctionType>::ResultType ()>(adoptRef(new BoundFunctionImpl<FunctionWrapper<FunctionType>, typename FunctionWrapper<FunctionType>::ResultType (A1, A2, A3, A4, A5, A6)>(FunctionWrapper<FunctionType>(function), a1, a2, a3, a4, a5, a6)));
-}
-
-}
-
-using WTF::Function;
-using WTF::bind;
-
-#endif // WTF_Functional_h
diff --git a/Source/WTF/wtf/GetPtr.h b/Source/WTF/wtf/GetPtr.h
index 25a0e6d9b..78107cbe5 100644
--- a/Source/WTF/wtf/GetPtr.h
+++ b/Source/WTF/wtf/GetPtr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Apple Computer, Inc.
+ * Copyright (C) 2006 Apple Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -21,12 +21,61 @@
#ifndef WTF_GetPtr_h
#define WTF_GetPtr_h
+#include <memory>
+
namespace WTF {
- template <typename T> inline T* getPtr(T* p)
- {
- return p;
- }
+enum HashTableDeletedValueType { HashTableDeletedValue };
+enum HashTableEmptyValueType { HashTableEmptyValue };
+
+template <typename T> inline T* getPtr(T* p) { return p; }
+
+template <typename T> struct IsSmartPtr {
+ static const bool value = false;
+};
+
+template <typename T, bool isSmartPtr>
+struct GetPtrHelperBase;
+
+template <typename T>
+struct GetPtrHelperBase<T, false /* isSmartPtr */> {
+ typedef T* PtrType;
+ static T* getPtr(T& p) { return std::addressof(p); }
+};
+
+template <typename T>
+struct GetPtrHelperBase<T, true /* isSmartPtr */> {
+ typedef typename T::PtrType PtrType;
+ static PtrType getPtr(const T& p) { return p.get(); }
+};
+
+template <typename T>
+struct GetPtrHelper : GetPtrHelperBase<T, IsSmartPtr<T>::value> {
+};
+
+template <typename T>
+inline typename GetPtrHelper<T>::PtrType getPtr(T& p)
+{
+ return GetPtrHelper<T>::getPtr(p);
+}
+
+template <typename T>
+inline typename GetPtrHelper<T>::PtrType getPtr(const T& p)
+{
+ return GetPtrHelper<T>::getPtr(p);
+}
+
+// Explicit specialization for C++ standard library types.
+
+template <typename T, typename Deleter> struct IsSmartPtr<std::unique_ptr<T, Deleter>> {
+ static const bool value = true;
+};
+
+template <typename T, typename Deleter>
+struct GetPtrHelper<std::unique_ptr<T, Deleter>> {
+ typedef T* PtrType;
+ static T* getPtr(const std::unique_ptr<T, Deleter>& p) { return p.get(); }
+};
} // namespace WTF
diff --git a/Source/WTF/wtf/GraphNodeWorklist.h b/Source/WTF/wtf/GraphNodeWorklist.h
new file mode 100644
index 000000000..e2ab0781c
--- /dev/null
+++ b/Source/WTF/wtf/GraphNodeWorklist.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GraphNodeWorklist_h
+#define GraphNodeWorklist_h
+
+#include <wtf/HashSet.h>
+
+namespace WTF {
+
+template<typename Node, typename Set = HashSet<Node>>
+class GraphNodeWorklist {
+public:
+ GraphNodeWorklist() { }
+ ~GraphNodeWorklist() { }
+
+ // Returns true if we didn't know about the node before.
+ bool push(Node node)
+ {
+ if (!m_seen.add(node))
+ return false;
+ m_stack.append(node);
+ return true;
+ }
+
+ template<typename Iterable>
+ void pushAll(const Iterable& iterable)
+ {
+ for (Node node : iterable)
+ push(node);
+ }
+
+ bool isEmpty() const { return m_stack.isEmpty(); }
+ bool notEmpty() const { return !m_stack.isEmpty(); }
+
+ Node pop()
+ {
+ if (m_stack.isEmpty())
+ return Node();
+ return m_stack.takeLast();
+ }
+
+ bool saw(Node node) { return m_seen.contains(node); }
+
+ const Set& seen() const { return m_seen; }
+
+private:
+ Set m_seen;
+ Vector<Node, 16> m_stack;
+};
+
+template<typename Node, typename T>
+struct GraphNodeWith {
+ GraphNodeWith()
+ : node()
+ , data()
+ {
+ }
+
+ GraphNodeWith(Node node, const T& data)
+ : node(node)
+ , data(data)
+ {
+ }
+
+ explicit operator bool() const { return !!node; }
+
+ Node node;
+ T data;
+};
+
+template<typename Node, typename T, typename Set = HashSet<Node>>
+class ExtendedGraphNodeWorklist {
+public:
+ ExtendedGraphNodeWorklist() { }
+
+ void forcePush(const GraphNodeWith<Node, T>& entry)
+ {
+ m_stack.append(entry);
+ }
+
+ void forcePush(Node node, const T& data)
+ {
+ forcePush(GraphNodeWith<Node, T>(node, data));
+ }
+
+ bool push(const GraphNodeWith<Node, T>& entry)
+ {
+ if (!m_seen.add(entry.node))
+ return false;
+
+ forcePush(entry);
+ return true;
+ }
+
+ bool push(Node node, const T& data)
+ {
+ return push(GraphNodeWith<Node, T>(node, data));
+ }
+
+ bool notEmpty() const { return !m_stack.isEmpty(); }
+
+ GraphNodeWith<Node, T> pop()
+ {
+ if (m_stack.isEmpty())
+ return GraphNodeWith<Node, T>();
+
+ return m_stack.takeLast();
+ }
+
+private:
+ Set m_seen;
+ Vector<GraphNodeWith<Node, T>> m_stack;
+};
+
+enum class GraphVisitOrder : uint8_t {
+ Pre,
+ Post
+};
+
+template<typename Node>
+struct GraphNodeWithOrder {
+ GraphNodeWithOrder()
+ : node()
+ , order(GraphVisitOrder::Pre)
+ {
+ }
+
+ GraphNodeWithOrder(Node node, GraphVisitOrder order)
+ : node(node)
+ , order(order)
+ {
+ }
+
+ explicit operator bool() const { return node; }
+
+ Node node;
+ GraphVisitOrder order;
+};
+
+template<typename Node, typename Set = HashSet<Node>>
+class PostOrderGraphNodeWorklist {
+public:
+ PostOrderGraphNodeWorklist()
+ {
+ }
+
+ ~PostOrderGraphNodeWorklist()
+ {
+ }
+
+ bool pushPre(Node node)
+ {
+ return m_worklist.push(node, GraphVisitOrder::Pre);
+ }
+
+ void pushPost(Node node)
+ {
+ m_worklist.forcePush(node, GraphVisitOrder::Post);
+ }
+
+ bool push(Node node, GraphVisitOrder order = GraphVisitOrder::Pre)
+ {
+ switch (order) {
+ case GraphVisitOrder::Pre:
+ return pushPre(node);
+ case GraphVisitOrder::Post:
+ pushPost(node);
+ return true;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+ bool push(const GraphNodeWithOrder<Node>& data)
+ {
+ return push(data.node, data.order);
+ }
+
+ bool notEmpty() const { return m_worklist.notEmpty(); }
+
+ GraphNodeWithOrder<Node> pop()
+ {
+ GraphNodeWith<Node, GraphVisitOrder> result = m_worklist.pop();
+ return GraphNodeWithOrder<Node>(result.node, result.data);
+ }
+
+private:
+ ExtendedGraphNodeWorklist<Node, GraphVisitOrder, Set> m_worklist;
+};
+
+} // namespace WTF
+
+using WTF::GraphNodeWorklist;
+using WTF::GraphNodeWith;
+using WTF::ExtendedGraphNodeWorklist;
+using WTF::GraphVisitOrder;
+using WTF::GraphNodeWithOrder;
+using WTF::PostOrderGraphNodeWorklist;
+
+#endif // GraphNodeWorklist_h
+
diff --git a/Source/WTF/wtf/GregorianDateTime.cpp b/Source/WTF/wtf/GregorianDateTime.cpp
index 5560984e6..d6317b308 100644
--- a/Source/WTF/wtf/GregorianDateTime.cpp
+++ b/Source/WTF/wtf/GregorianDateTime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Patrick Gansterer <paroga@paroga.com>
+ * Copyright (C) 2012, 2014 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -43,13 +43,16 @@ void GregorianDateTime::setToCurrentLocalTime()
TIME_ZONE_INFORMATION timeZoneInformation;
DWORD timeZoneId = GetTimeZoneInformation(&timeZoneInformation);
- LONG bias = timeZoneInformation.Bias;
- if (timeZoneId == TIME_ZONE_ID_DAYLIGHT)
- bias += timeZoneInformation.DaylightBias;
- else if (timeZoneId == TIME_ZONE_ID_STANDARD)
- bias += timeZoneInformation.StandardBias;
- else
- ASSERT(timeZoneId == TIME_ZONE_ID_UNKNOWN);
+ LONG bias = 0;
+ if (timeZoneId != TIME_ZONE_ID_INVALID) {
+ bias = timeZoneInformation.Bias;
+ if (timeZoneId == TIME_ZONE_ID_DAYLIGHT)
+ bias += timeZoneInformation.DaylightBias;
+ else if ((timeZoneId == TIME_ZONE_ID_STANDARD) || (timeZoneId == TIME_ZONE_ID_UNKNOWN))
+ bias += timeZoneInformation.StandardBias;
+ else
+ ASSERT(0);
+ }
m_year = systemTime.wYear;
m_month = systemTime.wMonth - 1;
@@ -64,7 +67,11 @@ void GregorianDateTime::setToCurrentLocalTime()
#else
tm localTM;
time_t localTime = time(0);
+#if HAVE(LOCALTIME_R)
localtime_r(&localTime, &localTM);
+#else
+ localtime_s(&localTime, &localTM);
+#endif
m_year = localTM.tm_year + 1900;
m_month = localTM.tm_mon;
diff --git a/Source/WTF/wtf/HashCountedSet.h b/Source/WTF/wtf/HashCountedSet.h
index 80cb6ff43..15ee14b47 100644
--- a/Source/WTF/wtf/HashCountedSet.h
+++ b/Source/WTF/wtf/HashCountedSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2006, 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2005, 2006, 2008, 2013, 2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -21,6 +21,7 @@
#ifndef WTF_HashCountedSet_h
#define WTF_HashCountedSet_h
+#include <initializer_list>
#include <wtf/Assertions.h>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
@@ -28,7 +29,7 @@
namespace WTF {
template<typename Value, typename HashFunctions = typename DefaultHash<Value>::Hash, typename Traits = HashTraits<Value>>
- class HashCountedSet {
+ class HashCountedSet final {
WTF_MAKE_FAST_ALLOCATED;
private:
typedef HashMap<Value, unsigned, HashFunctions, Traits> ImplType;
@@ -37,11 +38,27 @@ namespace WTF {
typedef typename ImplType::iterator iterator;
typedef typename ImplType::const_iterator const_iterator;
typedef typename ImplType::AddResult AddResult;
+
+ HashCountedSet()
+ {
+ }
+
+ HashCountedSet(std::initializer_list<typename ImplType::KeyValuePairType> initializerList)
+ {
+ for (const auto& keyValuePair : initializerList)
+ add(keyValuePair.key, keyValuePair.value);
+ }
+
+ HashCountedSet(std::initializer_list<typename ImplType::KeyType> initializerList)
+ {
+ for (const auto& value : initializerList)
+ add(value);
+ }
void swap(HashCountedSet&);
- int size() const;
- int capacity() const;
+ unsigned size() const;
+ unsigned capacity() const;
bool isEmpty() const;
// Iterators iterate over pairs of values and counts.
@@ -59,7 +76,12 @@ namespace WTF {
// The return value includes both an iterator to the value's location,
// and an isNewEntry bool that indicates whether it is a new or existing entry.
AddResult add(const ValueType&);
-
+ AddResult add(ValueType&&);
+
+ // Increments the count of a value by the passed amount.
+ AddResult add(const ValueType&, unsigned);
+ AddResult add(ValueType&&, unsigned);
+
// Decrements the count of the value, and removes it if count goes down to zero.
// Returns true if the value is removed.
bool remove(const ValueType&);
@@ -73,10 +95,18 @@ namespace WTF {
// Clears the whole set.
void clear();
+ // Overloads for smart pointer keys that take the raw pointer type as the parameter.
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, iterator>::type find(typename GetPtrHelper<V>::PtrType);
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, const_iterator>::type find(typename GetPtrHelper<V>::PtrType) const;
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, bool>::type contains(typename GetPtrHelper<V>::PtrType) const;
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, unsigned>::type count(typename GetPtrHelper<V>::PtrType) const;
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, bool>::type remove(typename GetPtrHelper<V>::PtrType);
+
private:
ImplType m_impl;
};
+
template<typename Value, typename HashFunctions, typename Traits>
inline void HashCountedSet<Value, HashFunctions, Traits>::swap(HashCountedSet& other)
{
@@ -84,13 +114,13 @@ namespace WTF {
}
template<typename Value, typename HashFunctions, typename Traits>
- inline int HashCountedSet<Value, HashFunctions, Traits>::size() const
+ inline unsigned HashCountedSet<Value, HashFunctions, Traits>::size() const
{
return m_impl.size();
}
template<typename Value, typename HashFunctions, typename Traits>
- inline int HashCountedSet<Value, HashFunctions, Traits>::capacity() const
+ inline unsigned HashCountedSet<Value, HashFunctions, Traits>::capacity() const
{
return m_impl.capacity();
}
@@ -156,6 +186,30 @@ namespace WTF {
++result.iterator->value;
return result;
}
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::AddResult HashCountedSet<Value, HashFunctions, Traits>::add(ValueType&& value)
+ {
+ AddResult result = m_impl.add(std::forward<Value>(value), 0);
+ ++result.iterator->value;
+ return result;
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::AddResult HashCountedSet<Value, HashFunctions, Traits>::add(const ValueType& value, unsigned count)
+ {
+ AddResult result = m_impl.add(value, 0);
+ result.iterator->value += count;
+ return result;
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::AddResult HashCountedSet<Value, HashFunctions, Traits>::add(ValueType&& value, unsigned count)
+ {
+ AddResult result = m_impl.add(std::forward<Value>(value), 0);
+ result.iterator->value += count;
+ return result;
+ }
template<typename Value, typename HashFunctions, typename Traits>
inline bool HashCountedSet<Value, HashFunctions, Traits>::remove(const ValueType& value)
@@ -229,8 +283,42 @@ namespace WTF {
vector[i] = (*it).key;
}
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashCountedSet<Value, HashFunctions, Traits>::find(typename GetPtrHelper<V>::PtrType value) -> typename std::enable_if<IsSmartPtr<V>::value, iterator>::type
+ {
+ return m_impl.find(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashCountedSet<Value, HashFunctions, Traits>::find(typename GetPtrHelper<V>::PtrType value) const -> typename std::enable_if<IsSmartPtr<V>::value, const_iterator>::type
+ {
+ return m_impl.find(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashCountedSet<Value, HashFunctions, Traits>::contains(typename GetPtrHelper<V>::PtrType value) const -> typename std::enable_if<IsSmartPtr<V>::value, bool>::type
+ {
+ return m_impl.contains(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashCountedSet<Value, HashFunctions, Traits>::count(typename GetPtrHelper<V>::PtrType value) const -> typename std::enable_if<IsSmartPtr<V>::value, unsigned>::type
+ {
+ return m_impl.get(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashCountedSet<Value, HashFunctions, Traits>::remove(typename GetPtrHelper<V>::PtrType value) -> typename std::enable_if<IsSmartPtr<V>::value, bool>::type
+ {
+ return remove(find(value));
+ }
-} // namespace khtml
+} // namespace WTF
using WTF::HashCountedSet;
diff --git a/Source/WTF/wtf/HashFunctions.h b/Source/WTF/wtf/HashFunctions.h
index 471a8f375..462f19a81 100644
--- a/Source/WTF/wtf/HashFunctions.h
+++ b/Source/WTF/wtf/HashFunctions.h
@@ -21,8 +21,9 @@
#ifndef WTF_HashFunctions_h
#define WTF_HashFunctions_h
-#include <wtf/RefPtr.h>
#include <stdint.h>
+#include <wtf/GetPtr.h>
+#include <wtf/RefPtr.h>
namespace WTF {
@@ -119,21 +120,37 @@ namespace WTF {
// pointer identity hash function
- template<typename T> struct PtrHash {
- static unsigned hash(T key)
- {
- return IntHash<uintptr_t>::hash(reinterpret_cast<uintptr_t>(key));
- }
- static bool equal(T a, T b) { return a == b; }
+ template<typename T, bool isSmartPointer>
+ struct PtrHashBase;
+
+ template <typename T>
+ struct PtrHashBase<T, false /* isSmartPtr */> {
+ typedef T PtrType;
+
+ static unsigned hash(PtrType key) { return IntHash<uintptr_t>::hash(reinterpret_cast<uintptr_t>(key)); }
+ static bool equal(PtrType a, PtrType b) { return a == b; }
static const bool safeToCompareToEmptyOrDeleted = true;
};
- template<typename P> struct PtrHash<RefPtr<P>> : PtrHash<P*> {
- using PtrHash<P*>::hash;
- static unsigned hash(const RefPtr<P>& key) { return hash(key.get()); }
- using PtrHash<P*>::equal;
- static bool equal(const RefPtr<P>& a, const RefPtr<P>& b) { return a == b; }
- static bool equal(P* a, const RefPtr<P>& b) { return a == b; }
- static bool equal(const RefPtr<P>& a, P* b) { return a == b; }
+
+ template <typename T>
+ struct PtrHashBase<T, true /* isSmartPtr */> {
+ typedef typename GetPtrHelper<T>::PtrType PtrType;
+
+ static unsigned hash(PtrType key) { return IntHash<uintptr_t>::hash(reinterpret_cast<uintptr_t>(key)); }
+ static bool equal(PtrType a, PtrType b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+
+ static unsigned hash(const T& key) { return hash(getPtr(key)); }
+ static bool equal(const T& a, const T& b) { return getPtr(a) == getPtr(b); }
+ static bool equal(PtrType a, const T& b) { return a == getPtr(b); }
+ static bool equal(const T& a, PtrType b) { return getPtr(a) == b; }
+ };
+
+ template<typename T> struct PtrHash : PtrHashBase<T, IsSmartPtr<T>::value> {
+ };
+
+ template<typename P> struct PtrHash<Ref<P>> : PtrHashBase<Ref<P>, IsSmartPtr<Ref<P>>::value> {
+ static const bool safeToCompareToEmptyOrDeleted = false;
};
// default hash function for each type
@@ -149,8 +166,7 @@ namespace WTF {
{
return DefaultHash<T>::Hash::equal(a.first, b.first) && DefaultHash<U>::Hash::equal(a.second, b.second);
}
- static const bool safeToCompareToEmptyOrDeleted = DefaultHash<T>::Hash::safeToCompareToEmptyOrDeleted
- && DefaultHash<U>::Hash::safeToCompareToEmptyOrDeleted;
+ static const bool safeToCompareToEmptyOrDeleted = DefaultHash<T>::Hash::safeToCompareToEmptyOrDeleted && DefaultHash<U>::Hash::safeToCompareToEmptyOrDeleted;
};
template<typename T, typename U> struct IntPairHash {
@@ -161,6 +177,7 @@ namespace WTF {
// make IntHash the default hash function for many integer types
+ template<> struct DefaultHash<bool> { typedef IntHash<uint8_t> Hash; };
template<> struct DefaultHash<short> { typedef IntHash<unsigned> Hash; };
template<> struct DefaultHash<unsigned short> { typedef IntHash<unsigned> Hash; };
template<> struct DefaultHash<int> { typedef IntHash<unsigned> Hash; };
@@ -181,6 +198,9 @@ namespace WTF {
template<typename P> struct DefaultHash<P*> { typedef PtrHash<P*> Hash; };
template<typename P> struct DefaultHash<RefPtr<P>> { typedef PtrHash<RefPtr<P>> Hash; };
+ template<typename P> struct DefaultHash<Ref<P>> { typedef PtrHash<Ref<P>> Hash; };
+
+ template<typename P, typename Deleter> struct DefaultHash<std::unique_ptr<P, Deleter>> { typedef PtrHash<std::unique_ptr<P, Deleter>> Hash; };
// make IntPairHash the default hash function for pairs of (at most) 32-bit integers.
diff --git a/Source/WTF/wtf/HashIterators.h b/Source/WTF/wtf/HashIterators.h
index b498b92c6..318a8496c 100644
--- a/Source/WTF/wtf/HashIterators.h
+++ b/Source/WTF/wtf/HashIterators.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -23,8 +23,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WTF_HashIterators_h
-#define WTF_HashIterators_h
+#pragma once
+
+#include <iterator>
namespace WTF {
@@ -33,7 +34,7 @@ namespace WTF {
template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableKeysIterator;
template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableValuesIterator;
- template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> {
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> : public std::iterator<std::forward_iterator_tag, KeyValuePair<KeyType, MappedType>, std::ptrdiff_t, const KeyValuePair<KeyType, MappedType>*, const KeyValuePair<KeyType, MappedType>&> {
private:
typedef KeyValuePair<KeyType, MappedType> ValueType;
public:
@@ -56,7 +57,7 @@ namespace WTF {
typename HashTableType::const_iterator m_impl;
};
- template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> {
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> : public std::iterator<std::forward_iterator_tag, KeyValuePair<KeyType, MappedType>, std::ptrdiff_t, KeyValuePair<KeyType, MappedType>*, KeyValuePair<KeyType, MappedType>&> {
private:
typedef KeyValuePair<KeyType, MappedType> ValueType;
public:
@@ -84,7 +85,7 @@ namespace WTF {
typename HashTableType::iterator m_impl;
};
- template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstKeysIterator {
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstKeysIterator : public std::iterator<std::forward_iterator_tag, KeyType, std::ptrdiff_t, const KeyType*, const KeyType&> {
private:
typedef HashTableConstIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> ConstIterator;
@@ -101,7 +102,7 @@ namespace WTF {
ConstIterator m_impl;
};
- template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstValuesIterator {
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstValuesIterator : public std::iterator<std::forward_iterator_tag, MappedType, std::ptrdiff_t, const MappedType*, const MappedType&> {
private:
typedef HashTableConstIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> ConstIterator;
@@ -118,7 +119,7 @@ namespace WTF {
ConstIterator m_impl;
};
- template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableKeysIterator {
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableKeysIterator : public std::iterator<std::forward_iterator_tag, KeyType, std::ptrdiff_t, KeyType*, KeyType&> {
private:
typedef HashTableIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> Iterator;
typedef HashTableConstIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> ConstIterator;
@@ -141,7 +142,7 @@ namespace WTF {
Iterator m_impl;
};
- template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableValuesIterator {
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableValuesIterator : public std::iterator<std::forward_iterator_tag, MappedType, std::ptrdiff_t, MappedType*, MappedType&> {
private:
typedef HashTableIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> Iterator;
typedef HashTableConstIteratorAdapter<HashTableType, KeyValuePair<KeyType, MappedType>> ConstIterator;
@@ -149,7 +150,7 @@ namespace WTF {
public:
HashTableValuesIterator(const Iterator& impl) : m_impl(impl) {}
- MappedType* get() const { return &(m_impl.get()->value); }
+ MappedType* get() const { return std::addressof(m_impl.get()->value); }
MappedType& operator*() const { return *get(); }
MappedType* operator->() const { return get(); }
@@ -212,7 +213,4 @@ namespace WTF {
return a.m_impl != b.m_impl;
}
-
} // namespace WTF
-
-#endif // WTF_HashIterators_h
diff --git a/Source/WTF/wtf/HashMap.h b/Source/WTF/wtf/HashMap.h
index be3001403..53c40f303 100644
--- a/Source/WTF/wtf/HashMap.h
+++ b/Source/WTF/wtf/HashMap.h
@@ -33,7 +33,7 @@ template<typename T> struct KeyValuePairKeyExtractor {
template<typename KeyArg, typename MappedArg, typename HashArg = typename DefaultHash<KeyArg>::Hash,
typename KeyTraitsArg = HashTraits<KeyArg>, typename MappedTraitsArg = HashTraits<MappedArg>>
-class HashMap {
+class HashMap final {
WTF_MAKE_FAST_ALLOCATED;
private:
typedef KeyTraitsArg KeyTraits;
@@ -54,6 +54,7 @@ public:
private:
typedef typename MappedTraits::PeekType MappedPeekType;
+ typedef typename MappedTraits::TakeType MappedTakeType;
typedef HashArg HashFunctions;
@@ -81,8 +82,8 @@ public:
void swap(HashMap&);
- int size() const;
- int capacity() const;
+ unsigned size() const;
+ unsigned capacity() const;
bool isEmpty() const;
// iterators iterate over pairs of keys and values
@@ -102,6 +103,9 @@ public:
bool contains(const KeyType&) const;
MappedPeekType get(const KeyType&) const;
+ // Same as get(), but aggressively inlined.
+ MappedPeekType fastGet(const KeyType&) const;
+
// Replaces the value but not the key if the key is already present.
// Return value includes both an iterator to the key location,
// and an isNewEntry boolean that's true if a new entry was added.
@@ -114,11 +118,20 @@ public:
template<typename V> AddResult add(const KeyType&, V&&);
template<typename V> AddResult add(KeyType&&, V&&);
+ // Same as add(), but aggressively inlined.
+ template<typename V> AddResult fastAdd(const KeyType&, V&&);
+ template<typename V> AddResult fastAdd(KeyType&&, V&&);
+
+ template<typename Functor> AddResult ensure(const KeyType&, Functor&&);
+ template<typename Functor> AddResult ensure(KeyType&&, Functor&&);
+
bool remove(const KeyType&);
bool remove(iterator);
+ template<typename Functor>
+ void removeIf(Functor&&);
void clear();
- MappedType take(const KeyType&); // efficient combination of get with remove
+ MappedTakeType take(const KeyType&); // efficient combination of get with remove
// An alternate version of find() that finds the object by hashing and comparing
// with some other type, to avoid the cost of type conversion. HashTranslator
@@ -137,6 +150,15 @@ public:
// static translate(ValueType&, const T&, unsigned hashCode);
template<typename HashTranslator, typename K, typename V> AddResult add(K&&, V&&);
+ // Overloads for smart pointer keys that take the raw pointer type as the parameter.
+ template<typename K = KeyType> typename std::enable_if<IsSmartPtr<K>::value, iterator>::type find(typename GetPtrHelper<K>::PtrType);
+ template<typename K = KeyType> typename std::enable_if<IsSmartPtr<K>::value, const_iterator>::type find(typename GetPtrHelper<K>::PtrType) const;
+ template<typename K = KeyType> typename std::enable_if<IsSmartPtr<K>::value, bool>::type contains(typename GetPtrHelper<K>::PtrType) const;
+ template<typename K = KeyType> typename std::enable_if<IsSmartPtr<K>::value, MappedPeekType>::type inlineGet(typename GetPtrHelper<K>::PtrType) const;
+ template<typename K = KeyType> typename std::enable_if<IsSmartPtr<K>::value, MappedPeekType>::type get(typename GetPtrHelper<K>::PtrType) const;
+ template<typename K = KeyType> typename std::enable_if<IsSmartPtr<K>::value, bool>::type remove(typename GetPtrHelper<K>::PtrType);
+ template<typename K = KeyType> typename std::enable_if<IsSmartPtr<K>::value, MappedTakeType>::type take(typename GetPtrHelper<K>::PtrType);
+
void checkConsistency() const;
static bool isValidKey(const KeyType&);
@@ -148,6 +170,9 @@ private:
template<typename K, typename V>
AddResult inlineAdd(K&&, V&&);
+ template<typename K, typename F>
+ AddResult inlineEnsure(K&&, F&&);
+
HashTableType m_impl;
};
@@ -157,8 +182,19 @@ struct HashMapTranslator {
template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
template<typename T, typename U, typename V> static void translate(T& location, U&& key, V&& mapped)
{
- location.key = std::forward<U>(key);
- location.value = std::forward<V>(mapped);
+ ValueTraits::KeyTraits::assignToEmpty(location.key, std::forward<U>(key));
+ ValueTraits::ValueTraits::assignToEmpty(location.value, std::forward<V>(mapped));
+ }
+};
+
+template<typename ValueTraits, typename HashFunctions>
+struct HashMapEnsureTranslator {
+ template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
+ template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
+ template<typename T, typename U, typename Functor> static void translate(T& location, U&& key, Functor&& functor)
+ {
+ ValueTraits::KeyTraits::assignToEmpty(location.key, std::forward<U>(key));
+ ValueTraits::ValueTraits::assignToEmpty(location.value, functor());
}
};
@@ -180,13 +216,13 @@ inline void HashMap<T, U, V, W, X>::swap(HashMap& other)
}
template<typename T, typename U, typename V, typename W, typename X>
-inline int HashMap<T, U, V, W, X>::size() const
+inline unsigned HashMap<T, U, V, W, X>::size() const
{
return m_impl.size();
}
template<typename T, typename U, typename V, typename W, typename X>
-inline int HashMap<T, U, V, W, X>::capacity() const
+inline unsigned HashMap<T, U, V, W, X>::capacity() const
{
return m_impl.capacity();
}
@@ -276,12 +312,19 @@ auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::inlineS
template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
template<typename K, typename V>
-auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::inlineAdd(K&& key, V&& value) -> AddResult
+ALWAYS_INLINE auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::inlineAdd(K&& key, V&& value) -> AddResult
{
return m_impl.template add<HashMapTranslator<KeyValuePairTraits, HashFunctions>>(std::forward<K>(key), std::forward<V>(value));
}
template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
+template<typename K, typename F>
+ALWAYS_INLINE auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::inlineEnsure(K&& key, F&& functor) -> AddResult
+{
+ return m_impl.template add<HashMapEnsureTranslator<KeyValuePairTraits, HashFunctions>>(std::forward<K>(key), std::forward<F>(functor));
+}
+
+template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
template<typename T>
auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::set(const KeyType& key, T&& mapped) -> AddResult
{
@@ -292,7 +335,7 @@ template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTrai
template<typename T>
auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::set(KeyType&& key, T&& mapped) -> AddResult
{
- return inlineSet(std::move(key), std::forward<T>(mapped));
+ return inlineSet(WTFMove(key), std::forward<T>(mapped));
}
template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
@@ -313,9 +356,37 @@ template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTrai
template<typename T>
auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::add(KeyType&& key, T&& mapped) -> AddResult
{
- return inlineAdd(std::move(key), std::forward<T>(mapped));
+ return inlineAdd(WTFMove(key), std::forward<T>(mapped));
+}
+
+template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
+template<typename T>
+ALWAYS_INLINE auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::fastAdd(const KeyType& key, T&& mapped) -> AddResult
+{
+ return inlineAdd(key, std::forward<T>(mapped));
+}
+
+template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
+template<typename T>
+ALWAYS_INLINE auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::fastAdd(KeyType&& key, T&& mapped) -> AddResult
+{
+ return inlineAdd(WTFMove(key), std::forward<T>(mapped));
+}
+
+template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
+template<typename Functor>
+auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::ensure(const KeyType& key, Functor&& functor) -> AddResult
+{
+ return inlineEnsure(key, std::forward<Functor>(functor));
}
+template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
+template<typename Functor>
+auto HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::ensure(KeyType&& key, Functor&& functor) -> AddResult
+{
+ return inlineEnsure(std::forward<KeyType>(key), std::forward<Functor>(functor));
+}
+
template<typename T, typename U, typename V, typename W, typename MappedTraits>
auto HashMap<T, U, V, W, MappedTraits>::get(const KeyType& key) const -> MappedPeekType
{
@@ -325,6 +396,15 @@ auto HashMap<T, U, V, W, MappedTraits>::get(const KeyType& key) const -> MappedP
return MappedTraits::peek(entry->value);
}
+template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ALWAYS_INLINE auto HashMap<T, U, V, W, MappedTraits>::fastGet(const KeyType& key) const -> MappedPeekType
+{
+ KeyValuePairType* entry = const_cast<HashTableType&>(m_impl).template inlineLookup<typename HashTableType::IdentityTranslatorType>(key);
+ if (!entry)
+ return MappedTraits::peek(MappedTraits::emptyValue());
+ return MappedTraits::peek(entry->value);
+}
+
template<typename T, typename U, typename V, typename W, typename X>
inline bool HashMap<T, U, V, W, X>::remove(iterator it)
{
@@ -336,6 +416,13 @@ inline bool HashMap<T, U, V, W, X>::remove(iterator it)
}
template<typename T, typename U, typename V, typename W, typename X>
+template<typename Functor>
+inline void HashMap<T, U, V, W, X>::removeIf(Functor&& functor)
+{
+ m_impl.removeIf(std::forward<Functor>(functor));
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
inline bool HashMap<T, U, V, W, X>::remove(const KeyType& key)
{
return remove(find(key));
@@ -348,12 +435,69 @@ inline void HashMap<T, U, V, W, X>::clear()
}
template<typename T, typename U, typename V, typename W, typename MappedTraits>
-auto HashMap<T, U, V, W, MappedTraits>::take(const KeyType& key) -> MappedType
+auto HashMap<T, U, V, W, MappedTraits>::take(const KeyType& key) -> MappedTakeType
+{
+ iterator it = find(key);
+ if (it == end())
+ return MappedTraits::take(MappedTraits::emptyValue());
+ auto value = MappedTraits::take(WTFMove(it->value));
+ remove(it);
+ return value;
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
+template<typename K>
+inline auto HashMap<T, U, V, W, X>::find(typename GetPtrHelper<K>::PtrType key) -> typename std::enable_if<IsSmartPtr<K>::value, iterator>::type
+{
+ return m_impl.template find<HashMapTranslator<KeyValuePairTraits, HashFunctions>>(key);
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
+template<typename K>
+inline auto HashMap<T, U, V, W, X>::find(typename GetPtrHelper<K>::PtrType key) const -> typename std::enable_if<IsSmartPtr<K>::value, const_iterator>::type
+{
+ return m_impl.template find<HashMapTranslator<KeyValuePairTraits, HashFunctions>>(key);
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
+template<typename K>
+inline auto HashMap<T, U, V, W, X>::contains(typename GetPtrHelper<K>::PtrType key) const -> typename std::enable_if<IsSmartPtr<K>::value, bool>::type
+{
+ return m_impl.template contains<HashMapTranslator<KeyValuePairTraits, HashFunctions>>(key);
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
+template<typename K>
+inline auto HashMap<T, U, V, W, X>::inlineGet(typename GetPtrHelper<K>::PtrType key) const -> typename std::enable_if<IsSmartPtr<K>::value, MappedPeekType>::type
+{
+ KeyValuePairType* entry = const_cast<HashTableType&>(m_impl).template inlineLookup<HashMapTranslator<KeyValuePairTraits, HashFunctions>>(key);
+ if (!entry)
+ return MappedTraits::peek(MappedTraits::emptyValue());
+ return MappedTraits::peek(entry->value);
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
+template<typename K>
+auto HashMap<T, U, V, W, X>::get(typename GetPtrHelper<K>::PtrType key) const -> typename std::enable_if<IsSmartPtr<K>::value, MappedPeekType>::type
+{
+ return inlineGet(key);
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
+template<typename K>
+inline auto HashMap<T, U, V, W, X>::remove(typename GetPtrHelper<K>::PtrType key) -> typename std::enable_if<IsSmartPtr<K>::value, bool>::type
+{
+ return remove(find(key));
+}
+
+template<typename T, typename U, typename V, typename W, typename X>
+template<typename K>
+inline auto HashMap<T, U, V, W, X>::take(typename GetPtrHelper<K>::PtrType key) -> typename std::enable_if<IsSmartPtr<K>::value, MappedTakeType>::type
{
iterator it = find(key);
if (it == end())
- return MappedTraits::emptyValue();
- MappedType value = std::move(it->value);
+ return MappedTraits::take(MappedTraits::emptyValue());
+ auto value = MappedTraits::take(WTFMove(it->value));
remove(it);
return value;
}
@@ -407,6 +551,19 @@ inline bool operator!=(const HashMap<T, U, V, W, X>& a, const HashMap<T, U, V, W
}
template<typename T, typename U, typename V, typename W, typename X, typename Y>
+inline void copyToVector(const HashMap<T, U, V, W, X>& collection, Y& vector)
+{
+ typedef typename HashMap<T, U, V, W, X>::const_iterator iterator;
+
+ vector.resize(collection.size());
+
+ iterator it = collection.begin();
+ iterator end = collection.end();
+ for (unsigned i = 0; it != end; ++it, ++i)
+ vector[i] = { (*it).key, (*it).value };
+}
+
+template<typename T, typename U, typename V, typename W, typename X, typename Y>
inline void copyKeysToVector(const HashMap<T, U, V, W, X>& collection, Y& vector)
{
typedef typename HashMap<T, U, V, W, X>::const_iterator::Keys iterator;
@@ -436,6 +593,4 @@ inline void copyValuesToVector(const HashMap<T, U, V, W, X>& collection, Y& vect
using WTF::HashMap;
-#include <wtf/RefPtrHashMap.h>
-
#endif /* WTF_HashMap_h */
diff --git a/Source/WTF/wtf/Encoder.h b/Source/WTF/wtf/HashMethod.h
index 109b0db8d..8d70f9002 100644
--- a/Source/WTF/wtf/Encoder.h
+++ b/Source/WTF/wtf/HashMethod.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,35 +23,23 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Encoder_h
-#define Encoder_h
+#ifndef HashMethod_h
+#define HashMethod_h
-#include <stdint.h>
+#include <wtf/StdLibExtras.h>
namespace WTF {
-class String;
-
-class Encoder {
-protected:
- Encoder() { }
- virtual ~Encoder() { }
-
-public:
- virtual void encodeBytes(const uint8_t*, size_t) = 0;
-
- virtual void encodeBool(bool) = 0;
- virtual void encodeUInt32(uint32_t) = 0;
- virtual void encodeUInt64(uint64_t) = 0;
- virtual void encodeInt32(int32_t) = 0;
- virtual void encodeInt64(int64_t) = 0;
- virtual void encodeFloat(float) = 0;
- virtual void encodeDouble(double) = 0;
- virtual void encodeString(const String&) = 0;
+template<typename T>
+struct HashMethod {
+ size_t operator()(const T& value) const
+ {
+ return value.hash();
+ }
};
} // namespace WTF
-using WTF::Encoder;
+using WTF::HashMethod;
-#endif // Encoder_h
+#endif // HashMethod_h
diff --git a/Source/WTF/wtf/HashSet.h b/Source/WTF/wtf/HashSet.h
index 46c3fe5d1..cf1f94214 100644
--- a/Source/WTF/wtf/HashSet.h
+++ b/Source/WTF/wtf/HashSet.h
@@ -23,6 +23,7 @@
#include <initializer_list>
#include <wtf/FastMalloc.h>
+#include <wtf/GetPtr.h>
#include <wtf/HashTable.h>
namespace WTF {
@@ -32,11 +33,12 @@ namespace WTF {
template<typename Value, typename HashFunctions, typename Traits> class HashSet;
template<typename ValueArg, typename HashArg = typename DefaultHash<ValueArg>::Hash,
- typename TraitsArg = HashTraits<ValueArg>> class HashSet {
+ typename TraitsArg = HashTraits<ValueArg>> class HashSet final {
WTF_MAKE_FAST_ALLOCATED;
private:
typedef HashArg HashFunctions;
typedef TraitsArg ValueTraits;
+ typedef typename ValueTraits::TakeType TakeType;
public:
typedef typename ValueTraits::TraitType ValueType;
@@ -62,8 +64,8 @@ namespace WTF {
void swap(HashSet&);
- int size() const;
- int capacity() const;
+ unsigned size() const;
+ unsigned capacity() const;
bool isEmpty() const;
iterator begin() const;
@@ -100,13 +102,24 @@ namespace WTF {
bool remove(const ValueType&);
bool remove(iterator);
+ template<typename Functor>
+ void removeIf(const Functor&);
void clear();
- ValueType take(const ValueType&);
+ TakeType take(const ValueType&);
+ TakeType take(iterator);
+ TakeType takeAny();
+
+ // Overloads for smart pointer values that take the raw pointer type as the parameter.
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, iterator>::type find(typename GetPtrHelper<V>::PtrType) const;
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, bool>::type contains(typename GetPtrHelper<V>::PtrType) const;
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, bool>::type remove(typename GetPtrHelper<V>::PtrType);
+ template<typename V = ValueType> typename std::enable_if<IsSmartPtr<V>::value, TakeType>::type take(typename GetPtrHelper<V>::PtrType);
static bool isValidValue(const ValueType&);
-
- bool operator==(const HashSet&) const;
+
+ template<typename OtherCollection>
+ bool operator==(const OtherCollection&) const;
private:
HashTableType m_impl;
@@ -116,6 +129,16 @@ namespace WTF {
template<typename T> static const T& extract(const T& t) { return t; }
};
+ template<typename ValueTraits, typename HashFunctions>
+ struct HashSetTranslator {
+ template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
+ template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
+ template<typename T, typename U, typename V> static void translate(T& location, U&&, V&& value)
+ {
+ ValueTraits::assignToEmpty(location, std::forward<V>(value));
+ }
+ };
+
template<typename Translator>
struct HashSetTranslatorAdapter {
template<typename T> static unsigned hash(const T& key) { return Translator::hash(key); }
@@ -133,13 +156,13 @@ namespace WTF {
}
template<typename T, typename U, typename V>
- inline int HashSet<T, U, V>::size() const
+ inline unsigned HashSet<T, U, V>::size() const
{
return m_impl.size();
}
template<typename T, typename U, typename V>
- inline int HashSet<T, U, V>::capacity() const
+ inline unsigned HashSet<T, U, V>::capacity() const
{
return m_impl.capacity();
}
@@ -197,7 +220,7 @@ namespace WTF {
template<typename T, typename U, typename V>
inline auto HashSet<T, U, V>::add(ValueType&& value) -> AddResult
{
- return m_impl.add(std::move(value));
+ return m_impl.add(WTFMove(value));
}
template<typename Value, typename HashFunctions, typename Traits>
@@ -234,24 +257,70 @@ namespace WTF {
}
template<typename T, typename U, typename V>
+ template<typename Functor>
+ inline void HashSet<T, U, V>::removeIf(const Functor& functor)
+ {
+ m_impl.removeIf(functor);
+ }
+
+ template<typename T, typename U, typename V>
inline void HashSet<T, U, V>::clear()
{
m_impl.clear();
}
template<typename T, typename U, typename V>
- auto HashSet<T, U, V>::take(const ValueType& value) -> ValueType
+ inline auto HashSet<T, U, V>::take(iterator it) -> TakeType
{
- auto it = find(value);
if (it == end())
- return ValueTraits::emptyValue();
+ return ValueTraits::take(ValueTraits::emptyValue());
- ValueType result = std::move(const_cast<ValueType&>(*it));
+ auto result = ValueTraits::take(WTFMove(const_cast<ValueType&>(*it)));
remove(it);
return result;
}
template<typename T, typename U, typename V>
+ inline auto HashSet<T, U, V>::take(const ValueType& value) -> TakeType
+ {
+ return take(find(value));
+ }
+
+ template<typename T, typename U, typename V>
+ inline auto HashSet<T, U, V>::takeAny() -> TakeType
+ {
+ return take(begin());
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashSet<Value, HashFunctions, Traits>::find(typename GetPtrHelper<V>::PtrType value) const -> typename std::enable_if<IsSmartPtr<V>::value, iterator>::type
+ {
+ return m_impl.template find<HashSetTranslator<Traits, HashFunctions>>(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashSet<Value, HashFunctions, Traits>::contains(typename GetPtrHelper<V>::PtrType value) const -> typename std::enable_if<IsSmartPtr<V>::value, bool>::type
+ {
+ return m_impl.template contains<HashSetTranslator<Traits, HashFunctions>>(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashSet<Value, HashFunctions, Traits>::remove(typename GetPtrHelper<V>::PtrType value) -> typename std::enable_if<IsSmartPtr<V>::value, bool>::type
+ {
+ return remove(find(value));
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename V>
+ inline auto HashSet<Value, HashFunctions, Traits>::take(typename GetPtrHelper<V>::PtrType value) -> typename std::enable_if<IsSmartPtr<V>::value, TakeType>::type
+ {
+ return take(find(value));
+ }
+
+ template<typename T, typename U, typename V>
inline bool HashSet<T, U, V>::isValidValue(const ValueType& value)
{
if (ValueTraits::isDeletedValue(value))
@@ -282,12 +351,13 @@ namespace WTF {
}
template<typename T, typename U, typename V>
- inline bool HashSet<T, U, V>::operator==(const HashSet& other) const
+ template<typename OtherCollection>
+ inline bool HashSet<T, U, V>::operator==(const OtherCollection& otherCollection) const
{
- if (size() != other.size())
+ if (size() != otherCollection.size())
return false;
- for (const_iterator iter = begin(); iter != end(); ++iter) {
- if (!other.contains(*iter))
+ for (const auto& other : otherCollection) {
+ if (!contains(other))
return false;
}
return true;
diff --git a/Source/WTF/wtf/HashTable.cpp b/Source/WTF/wtf/HashTable.cpp
index 458dd531f..b06eaf6c2 100644
--- a/Source/WTF/wtf/HashTable.cpp
+++ b/Source/WTF/wtf/HashTable.cpp
@@ -36,20 +36,11 @@ unsigned HashTableStats::numCollisions;
unsigned HashTableStats::collisionGraph[4096];
unsigned HashTableStats::maxCollisions;
-static std::mutex& hashTableStatsMutex()
-{
- static std::once_flag onceFlag;
- static std::mutex* mutex;
- std::call_once(onceFlag, []{
- mutex = std::make_unique<std::mutex>().release();
- });
-
- return *mutex;
-}
+static StaticLock hashTableStatsMutex;
void HashTableStats::recordCollisionAtCount(unsigned count)
{
- std::lock_guard<std::mutex> lock(hashTableStatsMutex());
+ std::lock_guard<StaticLock> lock(hashTableStatsMutex);
if (count > maxCollisions)
maxCollisions = count;
@@ -59,7 +50,7 @@ void HashTableStats::recordCollisionAtCount(unsigned count)
void HashTableStats::dumpStats()
{
- std::lock_guard<std::mutex> lock(hashTableStatsMutex());
+ std::lock_guard<StaticLock> lock(hashTableStatsMutex);
dataLogF("\nWTF::HashTable statistics\n\n");
dataLogF("%u accesses\n", numAccesses.load());
diff --git a/Source/WTF/wtf/HashTable.h b/Source/WTF/wtf/HashTable.h
index 8519ae364..a576e62ce 100644
--- a/Source/WTF/wtf/HashTable.h
+++ b/Source/WTF/wtf/HashTable.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012, 2015 Apple Inc. All rights reserved.
* Copyright (C) 2008 David Levin <levin@chromium.org>
*
* This library is free software; you can redistribute it and/or
@@ -19,10 +19,10 @@
*
*/
-#ifndef WTF_HashTable_h
-#define WTF_HashTable_h
+#pragma once
#include <atomic>
+#include <iterator>
#include <mutex>
#include <string.h>
#include <type_traits>
@@ -30,6 +30,8 @@
#include <wtf/Assertions.h>
#include <wtf/FastMalloc.h>
#include <wtf/HashTraits.h>
+#include <wtf/Lock.h>
+#include <wtf/MathExtras.h>
#include <wtf/StdLibExtras.h>
#include <wtf/ValueCheck.h>
@@ -101,7 +103,7 @@ namespace WTF {
typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
- class HashTableConstIterator {
+ class HashTableConstIterator : public std::iterator<std::forward_iterator_tag, Value, std::ptrdiff_t, const Value*, const Value&> {
private:
typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType;
typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator;
@@ -237,7 +239,7 @@ namespace WTF {
};
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
- class HashTableIterator {
+ class HashTableIterator : public std::iterator<std::forward_iterator_tag, Value, std::ptrdiff_t, Value*, Value&> {
private:
typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType;
typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator;
@@ -276,17 +278,23 @@ namespace WTF {
const_iterator m_iterator;
};
- template<typename HashFunctions> class IdentityHashTranslator {
+ template<typename ValueTraits, typename HashFunctions> class IdentityHashTranslator {
public:
template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
- template<typename T, typename U, typename V> static void translate(T& location, const U&, V&& value) { location = std::forward<V>(value); }
+ template<typename T, typename U, typename V> static void translate(T& location, const U&, V&& value)
+ {
+ ValueTraits::assignToEmpty(location, std::forward<V>(value));
+ }
};
template<typename IteratorType> struct HashTableAddResult {
+ HashTableAddResult() : isNewEntry(false) { }
HashTableAddResult(IteratorType iter, bool isNewEntry) : iterator(iter), isNewEntry(isNewEntry) { }
IteratorType iterator;
bool isNewEntry;
+
+ explicit operator bool() const { return isNewEntry; }
};
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
@@ -297,7 +305,7 @@ namespace WTF {
typedef Traits ValueTraits;
typedef Key KeyType;
typedef Value ValueType;
- typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
+ typedef IdentityHashTranslator<ValueTraits, HashFunctions> IdentityTranslatorType;
typedef HashTableAddResult<iterator> AddResult;
#if DUMP_HASHTABLE_STATS_PER_TABLE
@@ -313,16 +321,16 @@ namespace WTF {
{
}
- int numAccesses;
- int numRehashes;
- int numRemoves;
- int numReinserts;
+ unsigned numAccesses;
+ unsigned numRehashes;
+ unsigned numRemoves;
+ unsigned numReinserts;
- int maxCollisions;
- int numCollisions;
- int collisionGraph[4096];
+ unsigned maxCollisions;
+ unsigned numCollisions;
+ unsigned collisionGraph[4096];
- void recordCollisionAtCount(int count)
+ void recordCollisionAtCount(unsigned count)
{
if (count > maxCollisions)
maxCollisions = count;
@@ -336,7 +344,7 @@ namespace WTF {
dataLogF("%d accesses\n", numAccesses);
dataLogF("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
dataLogF("longest collision chain: %d\n", maxCollisions);
- for (int i = 1; i <= maxCollisions; i++) {
+ for (unsigned i = 1; i <= maxCollisions; i++) {
dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
}
dataLogF("%d rehashes\n", numRehashes);
@@ -360,6 +368,9 @@ namespace WTF {
void swap(HashTable&);
HashTable& operator=(const HashTable&);
+ HashTable(HashTable&&);
+ HashTable& operator=(HashTable&&);
+
// When the hash table is empty, just return the same iterator for end as for begin.
// This is more efficient because we don't have to skip all the empty and deleted
// buckets, and iterating an empty table is a common case that's worth optimizing.
@@ -368,12 +379,12 @@ namespace WTF {
const_iterator begin() const { return isEmpty() ? end() : makeConstIterator(m_table); }
const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); }
- int size() const { return m_keyCount; }
- int capacity() const { return m_tableSize; }
+ unsigned size() const { return m_keyCount; }
+ unsigned capacity() const { return m_tableSize; }
bool isEmpty() const { return !m_keyCount; }
AddResult add(const ValueType& value) { return add<IdentityTranslatorType>(Extractor::extract(value), value); }
- AddResult add(ValueType&& value) { return add<IdentityTranslatorType>(Extractor::extract(value), std::move(value)); }
+ AddResult add(ValueType&& value) { return add<IdentityTranslatorType>(Extractor::extract(value), WTFMove(value)); }
// A special version of add() that finds the object by hashing and comparing
// with some other type, to avoid the cost of type conversion if the object is already
@@ -393,6 +404,8 @@ namespace WTF {
void remove(iterator);
void removeWithoutEntryConsistencyCheck(iterator);
void removeWithoutEntryConsistencyCheck(const_iterator);
+ template<typename Functor>
+ void removeIf(const Functor&);
void clear();
static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
@@ -401,6 +414,7 @@ namespace WTF {
ValueType* lookup(const Key& key) { return lookup<IdentityTranslatorType>(key); }
template<typename HashTranslator, typename T> ValueType* lookup(const T&);
+ template<typename HashTranslator, typename T> ValueType* inlineLookup(const T&);
#if !ASSERT_DISABLED
void checkTableConsistency() const;
@@ -416,8 +430,8 @@ namespace WTF {
#endif
private:
- static ValueType* allocateTable(int size);
- static void deallocateTable(ValueType* table, int size);
+ static ValueType* allocateTable(unsigned size);
+ static void deallocateTable(ValueType* table, unsigned size);
typedef std::pair<ValueType*, bool> LookupType;
typedef std::pair<LookupType, unsigned> FullLookupType;
@@ -426,6 +440,8 @@ namespace WTF {
template<typename HashTranslator, typename T> FullLookupType fullLookupForWriting(const T&);
template<typename HashTranslator, typename T> LookupType lookupForWriting(const T&);
+ template<typename HashTranslator, typename T, typename Extra> void addUniqueForInitialization(T&& key, Extra&&);
+
template<typename HashTranslator, typename T> void checkKey(const T&);
void removeAndInvalidateWithoutEntryConsistencyCheck(ValueType*);
@@ -438,11 +454,11 @@ namespace WTF {
ValueType* expand(ValueType* entry = nullptr);
void shrink() { rehash(m_tableSize / 2, nullptr); }
- ValueType* rehash(int newTableSize, ValueType* entry);
+ ValueType* rehash(unsigned newTableSize, ValueType* entry);
ValueType* reinsert(ValueType&&);
static void initializeBucket(ValueType& bucket);
- static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket); }
+ static void deleteBucket(ValueType& bucket) { hashTraitsDeleteBucket<Traits>(bucket); }
FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash)
{ return FullLookupType(LookupType(position, found), hash); }
@@ -464,21 +480,21 @@ namespace WTF {
static void invalidateIterators() { }
#endif
- static const int m_maxLoad = 2;
- static const int m_minLoad = 6;
+ static const unsigned m_maxLoad = 2;
+ static const unsigned m_minLoad = 6;
ValueType* m_table;
- int m_tableSize;
- int m_tableSizeMask;
- int m_keyCount;
- int m_deletedCount;
+ unsigned m_tableSize;
+ unsigned m_tableSizeMask;
+ unsigned m_keyCount;
+ unsigned m_deletedCount;
#if CHECK_HASHTABLE_ITERATORS
public:
// All access to m_iterators should be guarded with m_mutex.
mutable const_iterator* m_iterators;
- // Use OwnPtr so HashTable can still be memmove'd or memcpy'ed.
- mutable std::unique_ptr<std::mutex> m_mutex;
+ // Use std::unique_ptr so HashTable can still be memmove'd or memcpy'ed.
+ mutable std::unique_ptr<Lock> m_mutex;
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
@@ -521,7 +537,7 @@ namespace WTF {
struct HashTableCapacityForSize {
static const unsigned value = HashTableCapacityForSizeSplitter<size, !(size & (size - 1))>::value;
COMPILE_ASSERT(size > 0, HashTableNonZeroMinimumCapacity);
- COMPILE_ASSERT(!static_cast<int>(value >> 31), HashTableNoCapacityOverflow);
+ COMPILE_ASSERT(!static_cast<unsigned>(value >> 31), HashTableNoCapacityOverflow);
COMPILE_ASSERT(value > (2 * size), HashTableCapacityHoldsContentSize);
};
@@ -534,7 +550,7 @@ namespace WTF {
, m_deletedCount(0)
#if CHECK_HASHTABLE_ITERATORS
, m_iterators(0)
- , m_mutex(std::make_unique<std::mutex>())
+ , m_mutex(std::make_unique<Lock>())
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
, m_stats(std::make_unique<Stats>())
@@ -582,13 +598,20 @@ namespace WTF {
template<typename HashTranslator, typename T>
inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::lookup(const T& key) -> ValueType*
{
+ return inlineLookup<HashTranslator>(key);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename HashTranslator, typename T>
+ ALWAYS_INLINE auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::inlineLookup(const T& key) -> ValueType*
+ {
checkKey<HashTranslator>(key);
- int k = 0;
- int sizeMask = m_tableSizeMask;
+ unsigned k = 0;
+ unsigned sizeMask = m_tableSizeMask;
ValueType* table = m_table;
unsigned h = HashTranslator::hash(key);
- int i = h & sizeMask;
+ unsigned i = h & sizeMask;
if (!table)
return 0;
@@ -641,15 +664,15 @@ namespace WTF {
ASSERT(m_table);
checkKey<HashTranslator>(key);
- int k = 0;
+ unsigned k = 0;
ValueType* table = m_table;
- int sizeMask = m_tableSizeMask;
+ unsigned sizeMask = m_tableSizeMask;
unsigned h = HashTranslator::hash(key);
- int i = h & sizeMask;
+ unsigned i = h & sizeMask;
#if DUMP_HASHTABLE_STATS
++HashTableStats::numAccesses;
- int probeCount = 0;
+ unsigned probeCount = 0;
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
@@ -702,11 +725,11 @@ namespace WTF {
ASSERT(m_table);
checkKey<HashTranslator>(key);
- int k = 0;
+ unsigned k = 0;
ValueType* table = m_table;
- int sizeMask = m_tableSizeMask;
+ unsigned sizeMask = m_tableSizeMask;
unsigned h = HashTranslator::hash(key);
- int i = h & sizeMask;
+ unsigned i = h & sizeMask;
#if DUMP_HASHTABLE_STATS
++HashTableStats::numAccesses;
@@ -756,12 +779,65 @@ namespace WTF {
}
}
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename HashTranslator, typename T, typename Extra>
+ ALWAYS_INLINE void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::addUniqueForInitialization(T&& key, Extra&& extra)
+ {
+ ASSERT(m_table);
+
+ checkKey<HashTranslator>(key);
+
+ invalidateIterators();
+
+ internalCheckTableConsistency();
+
+ unsigned k = 0;
+ ValueType* table = m_table;
+ unsigned sizeMask = m_tableSizeMask;
+ unsigned h = HashTranslator::hash(key);
+ unsigned i = h & sizeMask;
+
+#if DUMP_HASHTABLE_STATS
+ ++HashTableStats::numAccesses;
+ unsigned probeCount = 0;
+#endif
+
+#if DUMP_HASHTABLE_STATS_PER_TABLE
+ ++m_stats->numAccesses;
+#endif
+
+ ValueType* entry;
+ while (1) {
+ entry = table + i;
+
+ if (isEmptyBucket(*entry))
+ break;
+
+#if DUMP_HASHTABLE_STATS
+ ++probeCount;
+ HashTableStats::recordCollisionAtCount(probeCount);
+#endif
+
+#if DUMP_HASHTABLE_STATS_PER_TABLE
+ m_stats->recordCollisionAtCount(probeCount);
+#endif
+
+ if (k == 0)
+ k = 1 | doubleHash(h);
+ i = (i + k) & sizeMask;
+ }
+
+ HashTranslator::translate(*entry, std::forward<T>(key), std::forward<Extra>(extra));
+
+ internalCheckTableConsistency();
+ }
+
template<bool emptyValueIsZero> struct HashTableBucketInitializer;
template<> struct HashTableBucketInitializer<false> {
template<typename Traits, typename Value> static void initialize(Value& bucket)
{
- new (NotNull, &bucket) Value(Traits::emptyValue());
+ new (NotNull, std::addressof(bucket)) Value(Traits::emptyValue());
}
};
@@ -771,7 +847,7 @@ namespace WTF {
// This initializes the bucket without copying the empty value.
// That makes it possible to use this with types that don't support copying.
// The memset to 0 looks like a slow operation but is optimized by the compilers.
- memset(&bucket, 0, sizeof(bucket));
+ memset(std::addressof(bucket), 0, sizeof(bucket));
}
};
@@ -783,7 +859,7 @@ namespace WTF {
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
template<typename HashTranslator, typename T, typename Extra>
- inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::add(T&& key, Extra&& extra) -> AddResult
+ ALWAYS_INLINE auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::add(T&& key, Extra&& extra) -> AddResult
{
checkKey<HashTranslator>(key);
@@ -796,11 +872,11 @@ namespace WTF {
ASSERT(m_table);
- int k = 0;
+ unsigned k = 0;
ValueType* table = m_table;
- int sizeMask = m_tableSizeMask;
+ unsigned sizeMask = m_tableSizeMask;
unsigned h = HashTranslator::hash(key);
- int i = h & sizeMask;
+ unsigned i = h & sizeMask;
#if DUMP_HASHTABLE_STATS
++HashTableStats::numAccesses;
@@ -919,7 +995,7 @@ namespace WTF {
Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
newEntry->~Value();
- new (NotNull, newEntry) ValueType(std::move(entry));
+ new (NotNull, newEntry) ValueType(WTFMove(entry));
return newEntry;
}
@@ -1031,22 +1107,52 @@ namespace WTF {
}
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
- auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::allocateTable(int size) -> ValueType*
+ template<typename Functor>
+ inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeIf(const Functor& functor)
+ {
+ // We must use local copies in case "functor" or "deleteBucket"
+ // make a function call, which prevents the compiler from keeping
+ // the values in register.
+ unsigned removedBucketCount = 0;
+ ValueType* table = m_table;
+
+ for (unsigned i = m_tableSize; i--;) {
+ ValueType& bucket = table[i];
+ if (isEmptyOrDeletedBucket(bucket))
+ continue;
+
+ if (!functor(bucket))
+ continue;
+
+ deleteBucket(bucket);
+ ++removedBucketCount;
+ }
+ m_deletedCount += removedBucketCount;
+ m_keyCount -= removedBucketCount;
+
+ if (shouldShrink())
+ shrink();
+
+ internalCheckTableConsistency();
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::allocateTable(unsigned size) -> ValueType*
{
// would use a template member function with explicit specializations here, but
// gcc doesn't appear to support that
if (Traits::emptyValueIsZero)
return static_cast<ValueType*>(fastZeroedMalloc(size * sizeof(ValueType)));
ValueType* result = static_cast<ValueType*>(fastMalloc(size * sizeof(ValueType)));
- for (int i = 0; i < size; i++)
+ for (unsigned i = 0; i < size; i++)
initializeBucket(result[i]);
return result;
}
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
- void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::deallocateTable(ValueType* table, int size)
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::deallocateTable(ValueType* table, unsigned size)
{
- for (int i = 0; i < size; ++i) {
+ for (unsigned i = 0; i < size; ++i) {
if (!isDeletedBucket(table[i]))
table[i].~ValueType();
}
@@ -1056,7 +1162,7 @@ namespace WTF {
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::expand(ValueType* entry) -> ValueType*
{
- int newSize;
+ unsigned newSize;
if (m_tableSize == 0)
newSize = KeyTraits::minimumTableSize;
else if (mustRehashInPlace())
@@ -1068,11 +1174,11 @@ namespace WTF {
}
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
- auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::rehash(int newTableSize, ValueType* entry) -> ValueType*
+ auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::rehash(unsigned newTableSize, ValueType* entry) -> ValueType*
{
internalCheckTableConsistencyExceptSize();
- int oldTableSize = m_tableSize;
+ unsigned oldTableSize = m_tableSize;
ValueType* oldTable = m_table;
#if DUMP_HASHTABLE_STATS
@@ -1090,14 +1196,21 @@ namespace WTF {
m_table = allocateTable(newTableSize);
Value* newEntry = nullptr;
- for (int i = 0; i != oldTableSize; ++i) {
- if (isEmptyOrDeletedBucket(oldTable[i])) {
- ASSERT(&oldTable[i] != entry);
+ for (unsigned i = 0; i != oldTableSize; ++i) {
+ if (isDeletedBucket(oldTable[i])) {
+ ASSERT(std::addressof(oldTable[i]) != entry);
continue;
}
- Value* reinsertedEntry = reinsert(std::move(oldTable[i]));
- if (&oldTable[i] == entry) {
+ if (isEmptyBucket(oldTable[i])) {
+ ASSERT(std::addressof(oldTable[i]) != entry);
+ oldTable[i].~ValueType();
+ continue;
+ }
+
+ Value* reinsertedEntry = reinsert(WTFMove(oldTable[i]));
+ oldTable[i].~ValueType();
+ if (std::addressof(oldTable[i]) == entry) {
ASSERT(!newEntry);
newEntry = reinsertedEntry;
}
@@ -1105,7 +1218,7 @@ namespace WTF {
m_deletedCount = 0;
- deallocateTable(oldTable, oldTableSize);
+ fastFree(oldTable);
internalCheckTableConsistency();
return newEntry;
@@ -1123,30 +1236,45 @@ namespace WTF {
m_tableSize = 0;
m_tableSizeMask = 0;
m_keyCount = 0;
+ m_deletedCount = 0;
}
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable(const HashTable& other)
- : m_table(0)
+ : m_table(nullptr)
, m_tableSize(0)
, m_tableSizeMask(0)
, m_keyCount(0)
, m_deletedCount(0)
#if CHECK_HASHTABLE_ITERATORS
- , m_iterators(0)
- , m_mutex(std::make_unique<std::mutex>())
+ , m_iterators(nullptr)
+ , m_mutex(std::make_unique<Lock>())
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
, m_stats(std::make_unique<Stats>(*other.m_stats))
#endif
{
- // Copy the hash table the dumb way, by adding each element to the new table.
- // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed.
- // FIXME: It's likely that this can be improved, for static analyses that use
- // HashSets. https://bugs.webkit.org/show_bug.cgi?id=118455
- const_iterator end = other.end();
- for (const_iterator it = other.begin(); it != end; ++it)
- add(*it);
+ unsigned otherKeyCount = other.size();
+ if (!otherKeyCount)
+ return;
+
+ unsigned bestTableSize = WTF::roundUpToPowerOfTwo(otherKeyCount) * 2;
+
+ // With maxLoad at 1/2 and minLoad at 1/6, our average load is 2/6.
+ // If we are getting halfway between 2/6 and 1/2 (past 5/12), we double the size to avoid being too close to
+ // loadMax and bring the ratio close to 2/6. This give us a load in the bounds [3/12, 5/12).
+ bool aboveThreeQuarterLoad = otherKeyCount * 12 >= bestTableSize * 5;
+ if (aboveThreeQuarterLoad)
+ bestTableSize *= 2;
+
+ unsigned minimumTableSize = KeyTraits::minimumTableSize;
+ m_tableSize = std::max<unsigned>(bestTableSize, minimumTableSize);
+ m_tableSizeMask = m_tableSize - 1;
+ m_keyCount = otherKeyCount;
+ m_table = allocateTable(m_tableSize);
+
+ for (const auto& otherValue : other)
+ addUniqueForInitialization<IdentityTranslatorType>(Extractor::extract(otherValue), otherValue);
}
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
@@ -1155,38 +1283,57 @@ namespace WTF {
invalidateIterators();
other.invalidateIterators();
- ValueType* tmp_table = m_table;
- m_table = other.m_table;
- other.m_table = tmp_table;
+ std::swap(m_table, other.m_table);
+ std::swap(m_tableSize, other.m_tableSize);
+ std::swap(m_tableSizeMask, other.m_tableSizeMask);
+ std::swap(m_keyCount, other.m_keyCount);
+ std::swap(m_deletedCount, other.m_deletedCount);
- int tmp_tableSize = m_tableSize;
- m_tableSize = other.m_tableSize;
- other.m_tableSize = tmp_tableSize;
+#if DUMP_HASHTABLE_STATS_PER_TABLE
+ m_stats.swap(other.m_stats);
+#endif
+ }
- int tmp_tableSizeMask = m_tableSizeMask;
- m_tableSizeMask = other.m_tableSizeMask;
- other.m_tableSizeMask = tmp_tableSizeMask;
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::operator=(const HashTable& other) -> HashTable&
+ {
+ HashTable tmp(other);
+ swap(tmp);
+ return *this;
+ }
- int tmp_keyCount = m_keyCount;
- m_keyCount = other.m_keyCount;
- other.m_keyCount = tmp_keyCount;
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable(HashTable&& other)
+#if CHECK_HASHTABLE_ITERATORS
+ : m_iterators(nullptr)
+ , m_mutex(std::make_unique<Lock>())
+#endif
+ {
+ other.invalidateIterators();
- int tmp_deletedCount = m_deletedCount;
+ m_table = other.m_table;
+ m_tableSize = other.m_tableSize;
+ m_tableSizeMask = other.m_tableSizeMask;
+ m_keyCount = other.m_keyCount;
m_deletedCount = other.m_deletedCount;
- other.m_deletedCount = tmp_deletedCount;
+
+ other.m_table = nullptr;
+ other.m_tableSize = 0;
+ other.m_tableSizeMask = 0;
+ other.m_keyCount = 0;
+ other.m_deletedCount = 0;
#if DUMP_HASHTABLE_STATS_PER_TABLE
- m_stats.swap(other.m_stats);
+ m_stats = WTFMove(other.m_stats);
+ other.m_stats = nullptr;
#endif
}
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
- auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::operator=(const HashTable& other) -> HashTable&
+ inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::operator=(HashTable&& other) -> HashTable&
{
- // FIXME: It's likely that this can be improved, for static analyses that use
- // HashSets. https://bugs.webkit.org/show_bug.cgi?id=118455
- HashTable tmp(other);
- swap(tmp);
+ HashTable temp = WTFMove(other);
+ swap(temp);
return *this;
}
@@ -1206,9 +1353,9 @@ namespace WTF {
if (!m_table)
return;
- int count = 0;
- int deletedCount = 0;
- for (int j = 0; j < m_tableSize; ++j) {
+ unsigned count = 0;
+ unsigned deletedCount = 0;
+ for (unsigned j = 0; j < m_tableSize; ++j) {
ValueType* entry = m_table + j;
if (isEmptyBucket(*entry))
continue;
@@ -1239,7 +1386,7 @@ namespace WTF {
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::invalidateIterators()
{
- std::lock_guard<std::mutex> lock(*m_mutex);
+ std::lock_guard<Lock> lock(*m_mutex);
const_iterator* next;
for (const_iterator* p = m_iterators; p; p = next) {
next = p->m_next;
@@ -1261,7 +1408,7 @@ namespace WTF {
if (!table) {
it->m_next = 0;
} else {
- std::lock_guard<std::mutex> lock(*table->m_mutex);
+ std::lock_guard<Lock> lock(*table->m_mutex);
ASSERT(table->m_iterators != it);
it->m_next = table->m_iterators;
table->m_iterators = it;
@@ -1275,15 +1422,12 @@ namespace WTF {
template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* it)
{
- typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType;
- typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator;
-
// Delete iterator from doubly-linked list of iterators.
if (!it->m_table) {
ASSERT(!it->m_next);
ASSERT(!it->m_previous);
} else {
- std::lock_guard<std::mutex> lock(*it->m_table->m_mutex);
+ std::lock_guard<Lock> lock(*it->m_table->m_mutex);
if (it->m_next) {
ASSERT(it->m_next->m_previous == it);
it->m_next->m_previous = it->m_previous;
@@ -1307,7 +1451,7 @@ namespace WTF {
// iterator adapters
- template<typename HashTableType, typename ValueType> struct HashTableConstIteratorAdapter {
+ template<typename HashTableType, typename ValueType> struct HashTableConstIteratorAdapter : public std::iterator<std::forward_iterator_tag, ValueType, std::ptrdiff_t, const ValueType*, const ValueType&> {
HashTableConstIteratorAdapter() {}
HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
@@ -1321,7 +1465,7 @@ namespace WTF {
typename HashTableType::const_iterator m_impl;
};
- template<typename HashTableType, typename ValueType> struct HashTableIteratorAdapter {
+ template<typename HashTableType, typename ValueType> struct HashTableIteratorAdapter : public std::iterator<std::forward_iterator_tag, ValueType, std::ptrdiff_t, ValueType*, ValueType&> {
HashTableIteratorAdapter() {}
HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
@@ -1392,5 +1536,3 @@ namespace WTF {
} // namespace WTF
#include <wtf/HashIterators.h>
-
-#endif // WTF_HashTable_h
diff --git a/Source/WTF/wtf/HashTraits.h b/Source/WTF/wtf/HashTraits.h
index f14810021..542e7ad2c 100644
--- a/Source/WTF/wtf/HashTraits.h
+++ b/Source/WTF/wtf/HashTraits.h
@@ -21,18 +21,16 @@
#ifndef WTF_HashTraits_h
#define WTF_HashTraits_h
+#include <limits>
+#include <utility>
#include <wtf/HashFunctions.h>
+#include <wtf/Optional.h>
#include <wtf/StdLibExtras.h>
-#include <utility>
-#include <limits>
namespace WTF {
class String;
-template<typename T> class OwnPtr;
-template<typename T> class PassOwnPtr;
-
template<typename T> struct HashTraits;
template<bool isInteger, typename T> struct GenericHashTraitsBase;
@@ -48,7 +46,7 @@ template<typename T> struct GenericHashTraitsBase<false, T> {
// The starting table size. Can be overridden when we know beforehand that
// a hash table will have at least N entries.
- static const int minimumTableSize = 8;
+ static const unsigned minimumTableSize = 8;
};
// Default integer traits disallow both 0 and -1 as keys (max value instead of -1 for unsigned).
@@ -64,10 +62,18 @@ template<typename T> struct GenericHashTraits : GenericHashTraitsBase<std::is_in
static T emptyValue() { return T(); }
+ template<typename U, typename V>
+ static void assignToEmpty(U& emptyValue, V&& value)
+ {
+ emptyValue = std::forward<V>(value);
+ }
+
// Type for return value of functions that do not transfer ownership, such as get.
typedef T PeekType;
- static PeekType peek(const T& value) { return value; }
- static T& peek(T& value) { return value; } // Overloaded to avoid copying of non-temporary values.
+ template<typename U> static U&& peek(U&& value) { return std::forward<U>(value); }
+
+ typedef T TakeType;
+ template<typename U> static TakeType take(U&& value) { return std::forward<U>(value); }
};
template<typename T> struct HashTraits : GenericHashTraits<T> { };
@@ -89,6 +95,22 @@ template<typename T> struct UnsignedWithZeroKeyHashTraits : GenericHashTraits<T>
static bool isDeletedValue(T value) { return value == std::numeric_limits<T>::max() - 1; }
};
+template<typename T> struct SignedWithZeroKeyHashTraits : GenericHashTraits<T> {
+ static const bool emptyValueIsZero = false;
+ static T emptyValue() { return std::numeric_limits<T>::min(); }
+ static void constructDeletedValue(T& slot) { slot = std::numeric_limits<T>::max(); }
+ static bool isDeletedValue(T value) { return value == std::numeric_limits<T>::max(); }
+};
+
+// Can be used with strong enums, allows zero as key.
+template<typename T> struct StrongEnumHashTraits : GenericHashTraits<T> {
+ using UnderlyingType = typename std::underlying_type<T>::type;
+ static const bool emptyValueIsZero = false;
+ static T emptyValue() { return static_cast<T>(std::numeric_limits<UnderlyingType>::max()); }
+ static void constructDeletedValue(T& slot) { slot = static_cast<T>(std::numeric_limits<UnderlyingType>::max() - 1); }
+ static bool isDeletedValue(T value) { return value == static_cast<T>(std::numeric_limits<UnderlyingType>::max() - 1); }
+};
+
template<typename P> struct HashTraits<P*> : GenericHashTraits<P*> {
static const bool emptyValueIsZero = true;
static void constructDeletedValue(P*& slot) { slot = reinterpret_cast<P*>(-1); }
@@ -97,7 +119,7 @@ template<typename P> struct HashTraits<P*> : GenericHashTraits<P*> {
template<typename T> struct SimpleClassHashTraits : GenericHashTraits<T> {
static const bool emptyValueIsZero = true;
- static void constructDeletedValue(T& slot) { new (NotNull, &slot) T(HashTableDeletedValue); }
+ static void constructDeletedValue(T& slot) { new (NotNull, std::addressof(slot)) T(HashTableDeletedValue); }
static bool isDeletedValue(const T& value) { return value.isHashTableDeletedValue(); }
};
@@ -105,31 +127,76 @@ template<typename T, typename Deleter> struct HashTraits<std::unique_ptr<T, Dele
typedef std::nullptr_t EmptyValueType;
static EmptyValueType emptyValue() { return nullptr; }
+ static void constructDeletedValue(std::unique_ptr<T, Deleter>& slot) { new (NotNull, std::addressof(slot)) std::unique_ptr<T, Deleter> { reinterpret_cast<T*>(-1) }; }
+ static bool isDeletedValue(const std::unique_ptr<T, Deleter>& value) { return value.get() == reinterpret_cast<T*>(-1); }
+
typedef T* PeekType;
static T* peek(const std::unique_ptr<T, Deleter>& value) { return value.get(); }
static T* peek(std::nullptr_t) { return nullptr; }
-};
-
-template<typename T> struct HashTraits<OwnPtr<T>> : SimpleClassHashTraits<OwnPtr<T>> {
- typedef std::nullptr_t EmptyValueType;
- static EmptyValueType emptyValue() { return nullptr; }
- typedef T* PeekType;
- static T* peek(const OwnPtr<T>& value) { return value.get(); }
- static T* peek(std::nullptr_t) { return nullptr; }
+ static void customDeleteBucket(std::unique_ptr<T, Deleter>& value)
+ {
+ // The custom delete function exists to avoid a dead store before the value is destructed.
+ // The normal destruction sequence of a bucket would be:
+ // 1) Call the destructor of unique_ptr.
+ // 2) unique_ptr store a zero for its internal pointer.
+ // 3) unique_ptr destroys its value.
+ // 4) Call constructDeletedValue() to set the bucket as destructed.
+ //
+ // The problem is the call in (3) prevents the compile from eliminating the dead store in (2)
+ // becase a side effect of free() could be observing the value.
+ //
+ // This version of deleteBucket() ensures the dead 2 stores changing "value"
+ // are on the same side of the function call.
+ ASSERT(!isDeletedValue(value));
+ T* pointer = value.release();
+ constructDeletedValue(value);
+
+ // The null case happens if a caller uses std::move() to remove the pointer before calling remove()
+ // with an iterator. This is very uncommon.
+ if (LIKELY(pointer))
+ Deleter()(pointer);
+ }
};
template<typename P> struct HashTraits<RefPtr<P>> : SimpleClassHashTraits<RefPtr<P>> {
- static P* emptyValue() { return 0; }
+ static P* emptyValue() { return nullptr; }
typedef P* PeekType;
static PeekType peek(const RefPtr<P>& value) { return value.get(); }
static PeekType peek(P* value) { return value; }
+
+ static void customDeleteBucket(RefPtr<P>& value)
+ {
+ // See unique_ptr's customDeleteBucket() for an explanation.
+ ASSERT(!SimpleClassHashTraits<RefPtr<P>>::isDeletedValue(value));
+ auto valueToBeDestroyed = WTFMove(value);
+ SimpleClassHashTraits<RefPtr<P>>::constructDeletedValue(value);
+ }
+};
+
+template<typename P> struct HashTraits<Ref<P>> : SimpleClassHashTraits<Ref<P>> {
+ static const bool emptyValueIsZero = true;
+ static Ref<P> emptyValue() { return HashTableEmptyValue; }
+
+ static const bool hasIsEmptyValueFunction = true;
+ static bool isEmptyValue(const Ref<P>& value) { return value.isHashTableEmptyValue(); }
+
+ static void assignToEmpty(Ref<P>& emptyValue, Ref<P>&& newValue) { ASSERT(isEmptyValue(emptyValue)); emptyValue.assignToHashTableEmptyValue(WTFMove(newValue)); }
+
+ typedef P* PeekType;
+ static PeekType peek(const Ref<P>& value) { return const_cast<PeekType>(value.ptrAllowingHashTableEmptyValue()); }
+ static PeekType peek(P* value) { return value; }
+
+ typedef std::optional<Ref<P>> TakeType;
+ static TakeType take(Ref<P>&& value) { return isEmptyValue(value) ? std::nullopt : std::optional<Ref<P>>(WTFMove(value)); }
};
template<> struct HashTraits<String> : SimpleClassHashTraits<String> {
static const bool hasIsEmptyValueFunction = true;
static bool isEmptyValue(const String&);
+
+ static void customDeleteBucket(String&);
};
// This struct template is an implementation detail of the isHashTraitsEmptyValue function,
@@ -146,6 +213,30 @@ template<typename Traits, typename T> inline bool isHashTraitsEmptyValue(const T
return HashTraitsEmptyValueChecker<Traits, Traits::hasIsEmptyValueFunction>::isEmptyValue(value);
}
+template<typename Traits, typename T>
+struct HashTraitHasCustomDelete {
+ static T& bucketArg;
+ template<typename X> static std::true_type TestHasCustomDelete(X*, decltype(X::customDeleteBucket(bucketArg))* = nullptr);
+ static std::false_type TestHasCustomDelete(...);
+ typedef decltype(TestHasCustomDelete(static_cast<Traits*>(nullptr))) ResultType;
+ static const bool value = ResultType::value;
+};
+
+template<typename Traits, typename T>
+typename std::enable_if<HashTraitHasCustomDelete<Traits, T>::value>::type
+hashTraitsDeleteBucket(T& value)
+{
+ Traits::customDeleteBucket(value);
+}
+
+template<typename Traits, typename T>
+typename std::enable_if<!HashTraitHasCustomDelete<Traits, T>::value>::type
+hashTraitsDeleteBucket(T& value)
+{
+ value.~T();
+ Traits::constructDeletedValue(value);
+}
+
template<typename FirstTraitsArg, typename SecondTraitsArg>
struct PairHashTraits : GenericHashTraits<std::pair<typename FirstTraitsArg::TraitType, typename SecondTraitsArg::TraitType>> {
typedef FirstTraitsArg FirstTraits;
@@ -156,7 +247,7 @@ struct PairHashTraits : GenericHashTraits<std::pair<typename FirstTraitsArg::Tra
static const bool emptyValueIsZero = FirstTraits::emptyValueIsZero && SecondTraits::emptyValueIsZero;
static EmptyValueType emptyValue() { return std::make_pair(FirstTraits::emptyValue(), SecondTraits::emptyValue()); }
- static const int minimumTableSize = FirstTraits::minimumTableSize;
+ static const unsigned minimumTableSize = FirstTraits::minimumTableSize;
static void constructDeletedValue(TraitType& slot) { FirstTraits::constructDeletedValue(slot.first); }
static bool isDeletedValue(const TraitType& value) { return FirstTraits::isDeletedValue(value.first); }
@@ -197,14 +288,24 @@ struct KeyValuePairHashTraits : GenericHashTraits<KeyValuePair<typename KeyTrait
typedef ValueTraitsArg ValueTraits;
typedef KeyValuePair<typename KeyTraits::TraitType, typename ValueTraits::TraitType> TraitType;
typedef KeyValuePair<typename KeyTraits::EmptyValueType, typename ValueTraits::EmptyValueType> EmptyValueType;
+ typedef typename ValueTraitsArg::TraitType ValueType;
static const bool emptyValueIsZero = KeyTraits::emptyValueIsZero && ValueTraits::emptyValueIsZero;
static EmptyValueType emptyValue() { return KeyValuePair<typename KeyTraits::EmptyValueType, typename ValueTraits::EmptyValueType>(KeyTraits::emptyValue(), ValueTraits::emptyValue()); }
- static const int minimumTableSize = KeyTraits::minimumTableSize;
+ static const unsigned minimumTableSize = KeyTraits::minimumTableSize;
static void constructDeletedValue(TraitType& slot) { KeyTraits::constructDeletedValue(slot.key); }
static bool isDeletedValue(const TraitType& value) { return KeyTraits::isDeletedValue(value.key); }
+
+ static void customDeleteBucket(TraitType& value)
+ {
+ static_assert(std::is_trivially_destructible<KeyValuePair<int, int>>::value,
+ "The wrapper itself has to be trivially destructible for customDeleteBucket() to make sense, since we do not destruct the wrapper itself.");
+
+ hashTraitsDeleteBucket<KeyTraits>(value.key);
+ value.value.~ValueType();
+ }
};
template<typename Key, typename Value>
@@ -225,7 +326,7 @@ struct CustomHashTraits : public GenericHashTraits<T> {
static void constructDeletedValue(T& slot)
{
- new (NotNull, &slot) T(T::DeletedValue);
+ new (NotNull, std::addressof(slot)) T(T::DeletedValue);
}
static bool isDeletedValue(const T& value)
diff --git a/Source/WTF/wtf/StringHasher.h b/Source/WTF/wtf/Hasher.h
index 6431f5b8a..c0a2e8cf2 100644
--- a/Source/WTF/wtf/StringHasher.h
+++ b/Source/WTF/wtf/Hasher.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2006, 2008, 2010, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2005-2006, 2008, 2010, 2013, 2016 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* This library is free software; you can redistribute it and/or
@@ -19,10 +19,11 @@
*
*/
-#ifndef WTF_StringHasher_h
-#define WTF_StringHasher_h
+#ifndef WTF_Hasher_h
+#define WTF_Hasher_h
-#include <wtf/unicode/Unicode.h>
+#include <unicode/utypes.h>
+#include <wtf/text/LChar.h>
namespace WTF {
@@ -35,11 +36,12 @@ namespace WTF {
// JavaScriptCore and the CodeGeneratorJS.pm script in WebCore.
// Golden ratio. Arbitrary start value to avoid mapping all zeros to a hash value of zero.
-static const unsigned stringHashingStartValue = 0x9E3779B9U;
+static constexpr const unsigned stringHashingStartValue = 0x9E3779B9U;
class StringHasher {
public:
- static const unsigned flagCount = 8; // Save 8 bits for StringImpl to use as flags.
+ static constexpr const unsigned flagCount = 8; // Save 8 bits for StringImpl to use as flags.
+ static constexpr const unsigned maskHash = (1U << (sizeof(unsigned) * 8 - flagCount)) - 1;
StringHasher()
: m_hash(stringHashingStartValue)
@@ -130,7 +132,9 @@ public:
template<typename T, UChar Converter(T)> void addCharacters(const T* data, unsigned length)
{
- if (m_hasPendingCharacter && length) {
+ if (!length)
+ return;
+ if (m_hasPendingCharacter) {
m_hasPendingCharacter = false;
addCharactersAssumingAligned(m_pendingCharacter, Converter(*data++));
--length;
@@ -159,34 +163,12 @@ public:
unsigned hashWithTop8BitsMasked() const
{
- unsigned result = avalancheBits();
-
- // Reserving space from the high bits for flags preserves most of the hash's
- // value, since hash lookup typically masks out the high bits anyway.
- result &= (1U << (sizeof(result) * 8 - flagCount)) - 1;
-
- // This avoids ever returning a hash code of 0, since that is used to
- // signal "hash not computed yet". Setting the high bit maintains
- // reasonable fidelity to a hash code of 0 because it is likely to yield
- // exactly 0 when hash lookup masks out the high bits.
- if (!result)
- result = 0x80000000 >> flagCount;
-
- return result;
+ return finalizeAndMaskTop8Bits(processPendingCharacter());
}
unsigned hash() const
{
- unsigned result = avalancheBits();
-
- // This avoids ever returning a hash code of 0, since that is used to
- // signal "hash not computed yet". Setting the high bit maintains
- // reasonable fidelity to a hash code of 0 because it is likely to yield
- // exactly 0 when hash lookup masks out the high bits.
- if (!result)
- result = 0x80000000;
-
- return result;
+ return finalize(processPendingCharacter());
}
template<typename T, UChar Converter(T)> static unsigned computeHashAndMaskTop8Bits(const T* data, unsigned length)
@@ -239,19 +221,45 @@ public:
static unsigned hashMemory(const void* data, unsigned length)
{
- // FIXME: Why does this function use the version of the hash that drops the top 8 bits?
- // We want that for all string hashing so we can use those bits in StringImpl and hash
- // strings consistently, but I don't see why we'd want that for general memory hashing.
- ASSERT(!(length % 2));
- return computeHashAndMaskTop8Bits<UChar>(static_cast<const UChar*>(data), length / sizeof(UChar));
+ size_t lengthInUChar = length / sizeof(UChar);
+ StringHasher hasher;
+ hasher.addCharactersAssumingAligned(static_cast<const UChar*>(data), lengthInUChar);
+
+ for (size_t i = 0; i < length % sizeof(UChar); ++i)
+ hasher.addCharacter(static_cast<const char*>(data)[lengthInUChar * sizeof(UChar) + i]);
+
+ return hasher.hash();
}
template<size_t length> static unsigned hashMemory(const void* data)
{
- static_assert(!(length % 2), "length must be a multiple of two!");
return hashMemory(data, length);
}
+ static constexpr unsigned finalize(unsigned hash)
+ {
+ return avoidZero(avalancheBits(hash));
+ }
+
+ static constexpr unsigned finalizeAndMaskTop8Bits(unsigned hash)
+ {
+ // Reserving space from the high bits for flags preserves most of the hash's
+ // value, since hash lookup typically masks out the high bits anyway.
+ return avoidZero(avalancheBits(hash) & StringHasher::maskHash);
+ }
+
+ template<typename T, unsigned charactersCount>
+ static constexpr unsigned computeLiteralHash(const T (&characters)[charactersCount])
+ {
+ return StringHasher::finalize(computeLiteralHashImpl(stringHashingStartValue, 0, characters, charactersCount - 1));
+ }
+
+ template<typename T, unsigned charactersCount>
+ static constexpr unsigned computeLiteralHashAndMaskTop8Bits(const T (&characters)[charactersCount])
+ {
+ return StringHasher::finalizeAndMaskTop8Bits(computeLiteralHashImpl(stringHashingStartValue, 0, characters, charactersCount - 1));
+ }
+
private:
static UChar defaultConverter(UChar character)
{
@@ -263,7 +271,41 @@ private:
return character;
}
- unsigned avalancheBits() const
+ ALWAYS_INLINE static constexpr unsigned avalancheBits3(unsigned hash)
+ {
+ return hash ^ (hash << 10);
+ }
+
+ ALWAYS_INLINE static constexpr unsigned avalancheBits2(unsigned hash)
+ {
+ return avalancheBits3(hash + (hash >> 15));
+ }
+
+ ALWAYS_INLINE static constexpr unsigned avalancheBits1(unsigned hash)
+ {
+ return avalancheBits2(hash ^ (hash << 2));
+ }
+
+ ALWAYS_INLINE static constexpr unsigned avalancheBits0(unsigned hash)
+ {
+ return avalancheBits1(hash + (hash >> 5));
+ }
+
+ ALWAYS_INLINE static constexpr unsigned avalancheBits(unsigned hash)
+ {
+ return avalancheBits0(hash ^ (hash << 3));
+ }
+
+ // This avoids ever returning a hash code of 0, since that is used to
+ // signal "hash not computed yet". Setting the high bit maintains
+ // reasonable fidelity to a hash code of 0 because it is likely to yield
+ // exactly 0 when hash lookup masks out the high bits.
+ ALWAYS_INLINE static constexpr unsigned avoidZero(unsigned hash)
+ {
+ return hash ? hash : (0x80000000 >> StringHasher::flagCount);
+ }
+
+ unsigned processPendingCharacter() const
{
unsigned result = m_hash;
@@ -273,15 +315,49 @@ private:
result ^= result << 11;
result += result >> 17;
}
+ return result;
+ }
- // Force "avalanching" of final 31 bits.
- result ^= result << 3;
- result += result >> 5;
- result ^= result << 2;
- result += result >> 15;
- result ^= result << 10;
- return result;
+ // FIXME: This code limits itself to the older, more limited C++11 constexpr capabilities, using
+ // recursion instead of looping, for example. Would be nice to rewrite this in a simpler way
+ // once we no longer need to support compilers like GCC 4.9 that do not yet support it.
+ static constexpr unsigned calculateWithRemainingLastCharacter1(unsigned hash)
+ {
+ return hash + (hash >> 17);
+ }
+
+ static constexpr unsigned calculateWithRemainingLastCharacter0(unsigned hash)
+ {
+ return calculateWithRemainingLastCharacter1((hash << 11) ^ hash);
+ }
+
+ static constexpr unsigned calculateWithRemainingLastCharacter(unsigned hash, unsigned character)
+ {
+ return calculateWithRemainingLastCharacter0(hash + character);
+ }
+
+ static constexpr unsigned calculate1(unsigned hash)
+ {
+ return hash + (hash >> 11);
+ }
+
+ static constexpr unsigned calculate0(unsigned hash, unsigned secondCharacter)
+ {
+ return calculate1((hash << 16) ^ ((secondCharacter << 11) ^ hash));
+ }
+
+ static constexpr unsigned calculate(unsigned hash, unsigned firstCharacter, unsigned secondCharacter)
+ {
+ return calculate0(hash + firstCharacter, secondCharacter);
+ }
+
+ static constexpr unsigned computeLiteralHashImpl(unsigned hash, unsigned index, const char* characters, unsigned length)
+ {
+ return (index == length)
+ ? hash : ((index + 1) == length)
+ ? calculateWithRemainingLastCharacter(hash, characters[index])
+ : computeLiteralHashImpl(calculate(hash, characters[index], characters[index + 1]), index + 2, characters, length);
}
unsigned m_hash;
@@ -289,8 +365,25 @@ private:
UChar m_pendingCharacter;
};
+class IntegerHasher {
+public:
+ void add(unsigned integer)
+ {
+ m_underlyingHasher.addCharactersAssumingAligned(integer, integer >> 16);
+ }
+
+ unsigned hash() const
+ {
+ return m_underlyingHasher.hash();
+ }
+
+private:
+ StringHasher m_underlyingHasher;
+};
+
} // namespace WTF
+using WTF::IntegerHasher;
using WTF::StringHasher;
-#endif // WTF_StringHasher_h
+#endif // WTF_Hasher_h
diff --git a/Source/WTF/wtf/HexNumber.h b/Source/WTF/wtf/HexNumber.h
index b698dd50e..c67b71edc 100644
--- a/Source/WTF/wtf/HexNumber.h
+++ b/Source/WTF/wtf/HexNumber.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011 Research In Motion Limited. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -17,33 +18,29 @@
* Boston, MA 02110-1301, USA.
*/
-#ifndef HexNumber_h
-#define HexNumber_h
+#pragma once
#include <wtf/text/StringConcatenate.h>
namespace WTF {
-enum HexConversionMode {
- Lowercase,
- Uppercase
-};
+enum HexConversionMode { Lowercase, Uppercase };
namespace Internal {
-const LChar lowerHexDigits[17] = "0123456789abcdef";
-const LChar upperHexDigits[17] = "0123456789ABCDEF";
inline const LChar* hexDigitsForMode(HexConversionMode mode)
{
- return mode == Lowercase ? lowerHexDigits : upperHexDigits;
+ static const LChar lowercaseHexDigits[17] = "0123456789abcdef";
+ static const LChar uppercaseHexDigits[17] = "0123456789ABCDEF";
+ return mode == Lowercase ? lowercaseHexDigits : uppercaseHexDigits;
}
-}; // namespace Internal
+} // namespace Internal
template<typename T>
inline void appendByteAsHex(unsigned char byte, T& destination, HexConversionMode mode = Uppercase)
{
- const LChar* hexDigits = Internal::hexDigitsForMode(mode);
+ auto* hexDigits = Internal::hexDigitsForMode(mode);
destination.append(hexDigits[byte >> 4]);
destination.append(hexDigits[byte & 0xF]);
}
@@ -51,7 +48,7 @@ inline void appendByteAsHex(unsigned char byte, T& destination, HexConversionMod
template<typename T>
inline void placeByteAsHexCompressIfPossible(unsigned char byte, T& destination, unsigned& index, HexConversionMode mode = Uppercase)
{
- const LChar* hexDigits = Internal::hexDigitsForMode(mode);
+ auto* hexDigits = Internal::hexDigitsForMode(mode);
if (byte >= 0x10)
destination[index++] = hexDigits[byte >> 4];
destination[index++] = hexDigits[byte & 0xF];
@@ -60,7 +57,7 @@ inline void placeByteAsHexCompressIfPossible(unsigned char byte, T& destination,
template<typename T>
inline void placeByteAsHex(unsigned char byte, T& destination, HexConversionMode mode = Uppercase)
{
- const LChar* hexDigits = Internal::hexDigitsForMode(mode);
+ auto* hexDigits = Internal::hexDigitsForMode(mode);
*destination++ = hexDigits[byte >> 4];
*destination++ = hexDigits[byte & 0xF];
}
@@ -68,7 +65,7 @@ inline void placeByteAsHex(unsigned char byte, T& destination, HexConversionMode
template<typename T>
inline void appendUnsignedAsHex(unsigned number, T& destination, HexConversionMode mode = Uppercase)
{
- const LChar* hexDigits = Internal::hexDigitsForMode(mode);
+ auto* hexDigits = Internal::hexDigitsForMode(mode);
Vector<LChar, 8> result;
do {
result.append(hexDigits[number % 16]);
@@ -78,6 +75,20 @@ inline void appendUnsignedAsHex(unsigned number, T& destination, HexConversionMo
result.reverse();
destination.append(result.data(), result.size());
}
+
+template<typename T>
+inline void appendUnsigned64AsHex(uint64_t number, T& destination, HexConversionMode mode = Uppercase)
+{
+ auto* hexDigits = Internal::hexDigitsForMode(mode);
+ Vector<LChar, 8> result;
+ do {
+ result.append(hexDigits[number % 16]);
+ number >>= 4;
+ } while (number > 0);
+
+ result.reverse();
+ destination.append(result.data(), result.size());
+}
// Same as appendUnsignedAsHex, but using exactly 'desiredDigits' for the conversion.
template<typename T>
@@ -85,7 +96,7 @@ inline void appendUnsignedAsHexFixedSize(unsigned number, T& destination, unsign
{
ASSERT(desiredDigits);
- const LChar* hexDigits = Internal::hexDigitsForMode(mode);
+ auto* hexDigits = Internal::hexDigitsForMode(mode);
Vector<LChar, 8> result;
do {
result.append(hexDigits[number % 16]);
@@ -105,5 +116,3 @@ using WTF::appendUnsignedAsHexFixedSize;
using WTF::placeByteAsHex;
using WTF::placeByteAsHexCompressIfPossible;
using WTF::Lowercase;
-
-#endif // HexNumber_h
diff --git a/Source/WTF/wtf/Indenter.h b/Source/WTF/wtf/Indenter.h
new file mode 100644
index 000000000..e174b824a
--- /dev/null
+++ b/Source/WTF/wtf/Indenter.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Indenter_h
+#define Indenter_h
+
+#include <wtf/FilePrintStream.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+class Indenter {
+public:
+ Indenter(unsigned count = 0, String string = ASCIILiteral(" "))
+ : m_count(count)
+ , m_string(string)
+ { }
+
+ Indenter(const Indenter& other)
+ : m_count(other.m_count)
+ , m_string(other.m_string)
+ { }
+
+ void dump(PrintStream& out) const
+ {
+ unsigned levels = m_count;
+ while (levels--)
+ out.print(m_string);
+ }
+
+ unsigned operator++() { return ++m_count; }
+ unsigned operator++(int) { return m_count++; }
+ unsigned operator--() { return --m_count; }
+ unsigned operator--(int) { return m_count--; }
+
+private:
+ unsigned m_count;
+ String m_string;
+};
+
+} // namespace WTF
+
+using WTF::Indenter;
+
+#endif // Indenter_h
diff --git a/Source/WTF/wtf/IndexMap.h b/Source/WTF/wtf/IndexMap.h
new file mode 100644
index 000000000..02d61017a
--- /dev/null
+++ b/Source/WTF/wtf/IndexMap.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Vector.h>
+
+namespace WTF {
+
+// This is a map for keys that have an index(). It's super efficient for BasicBlocks. It's only
+// efficient for Values if you don't create too many of these maps, since Values can have very
+// sparse indices and there are a lot of Values.
+
+template<typename Key, typename Value>
+class IndexMap {
+public:
+ explicit IndexMap(size_t size = 0)
+ {
+ m_vector.fill(Value(), size);
+ }
+
+ void resize(size_t size)
+ {
+ m_vector.fill(Value(), size);
+ }
+
+ void clear()
+ {
+ m_vector.fill(Value(), m_vector.size());
+ }
+
+ size_t size() const { return m_vector.size(); }
+
+ Value& operator[](size_t index)
+ {
+ return m_vector[index];
+ }
+
+ const Value& operator[](size_t index) const
+ {
+ return m_vector[index];
+ }
+
+ Value& operator[](Key* key)
+ {
+ return m_vector[key->index()];
+ }
+
+ const Value& operator[](Key* key) const
+ {
+ return m_vector[key->index()];
+ }
+
+private:
+ Vector<Value> m_vector;
+};
+
+} // namespace WTF
+
+using WTF::IndexMap;
diff --git a/Source/WTF/wtf/IndexSet.h b/Source/WTF/wtf/IndexSet.h
new file mode 100644
index 000000000..61bd04f2d
--- /dev/null
+++ b/Source/WTF/wtf/IndexSet.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/BitVector.h>
+#include <wtf/CommaPrinter.h>
+
+namespace WTF {
+
+// This is a set for things that have an index(). It's super efficient for BasicBlocks. It's only
+// efficient for Values if you don't create too many of these sets, since Values can have very sparse
+// indices and there are a lot of Values.
+
+// If you want a set of BasicBlocks, you do IndexSet<BasicBlock>. So, T = BasicBlock.
+template<typename T>
+class IndexSet {
+public:
+ IndexSet()
+ {
+ }
+
+ bool add(T* value)
+ {
+ return !m_set.set(value->index());
+ }
+
+ template<typename Iterable>
+ bool addAll(const Iterable& iterable)
+ {
+ bool result = false;
+ for (T* value : iterable)
+ result |= add(value);
+ return result;
+ }
+
+ bool remove(T* value)
+ {
+ return m_set.clear(value->index());
+ }
+
+ bool contains(T* value) const
+ {
+ if (!value)
+ return false;
+ return m_set.get(value->index());
+ }
+
+ size_t size() const
+ {
+ return m_set.bitCount();
+ }
+
+ bool isEmpty() const
+ {
+ return !size();
+ }
+
+ template<typename CollectionType>
+ class Iterable {
+ public:
+ Iterable(const CollectionType& collection, const BitVector& set)
+ : m_collection(collection)
+ , m_set(set)
+ {
+ }
+
+ class iterator {
+ public:
+ iterator()
+ : m_collection(nullptr)
+ {
+ }
+
+ iterator(const CollectionType& collection, BitVector::iterator iter)
+ : m_collection(&collection)
+ , m_iter(iter)
+ {
+ }
+
+ T* operator*()
+ {
+ return m_collection->at(*m_iter);
+ }
+
+ iterator& operator++()
+ {
+ ++m_iter;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_iter == other.m_iter;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ const CollectionType* m_collection;
+ BitVector::iterator m_iter;
+ };
+
+ iterator begin() const { return iterator(m_collection, m_set.begin()); }
+ iterator end() const { return iterator(m_collection, m_set.end()); }
+
+ private:
+ const CollectionType& m_collection;
+ const BitVector& m_set;
+ };
+
+ // For basic blocks, you do:
+ // indexSet.values(procedure);
+ //
+ // For values, you do:
+ // indexSet.values(procedure.values());
+ template<typename CollectionType>
+ Iterable<CollectionType> values(const CollectionType& collection) const
+ {
+ return Iterable<CollectionType>(collection, indices());
+ }
+
+ const BitVector& indices() const { return m_set; }
+
+ void dump(PrintStream& out) const
+ {
+ CommaPrinter comma;
+ for (size_t index : indices())
+ out.print(comma, T::dumpPrefix, index);
+ }
+
+private:
+ BitVector m_set;
+};
+
+} // namespace WTF
+
+using WTF::IndexSet;
diff --git a/Source/WTF/wtf/IndexSparseSet.h b/Source/WTF/wtf/IndexSparseSet.h
new file mode 100644
index 000000000..f5bfbb32e
--- /dev/null
+++ b/Source/WTF/wtf/IndexSparseSet.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef IndexSparseSet_h
+#define IndexSparseSet_h
+
+#include <wtf/Vector.h>
+
+namespace WTF {
+
+// IndexSparseSet is an efficient set of integers that can only be valued
+// between zero and size() - 1.
+//
+// The implementation is using Briggs Sparse Set representation. We allocate
+// memory from 0 to size() - 1 to do mapping in O(1), but we never initialize
+// that memory. When adding/removing values to the set, they are added in a list
+// and the corresponding bucket is initialized to the position in the list.
+//
+// The assumption here is that we only need a sparse subset of number live at any
+// time.
+
+template<typename OverflowHandler = CrashOnOverflow>
+class IndexSparseSet {
+ typedef Vector<unsigned, 0, OverflowHandler> ValueList;
+public:
+ explicit IndexSparseSet(unsigned size);
+
+ bool add(unsigned);
+ bool remove(unsigned);
+ void clear();
+
+ unsigned size() const;
+ bool isEmpty() const;
+ bool contains(unsigned) const;
+
+ typedef typename ValueList::const_iterator const_iterator;
+ const_iterator begin() const;
+ const_iterator end() const;
+
+private:
+ Vector<unsigned, 0, OverflowHandler, 1> m_map;
+ ValueList m_values;
+};
+
+template<typename OverflowHandler>
+inline IndexSparseSet<OverflowHandler>::IndexSparseSet(unsigned size)
+{
+ m_map.resize(size);
+}
+
+template<typename OverflowHandler>
+inline bool IndexSparseSet<OverflowHandler>::add(unsigned value)
+{
+ if (contains(value))
+ return false;
+
+ unsigned newPosition = m_values.size();
+ m_values.append(value);
+ m_map[value] = newPosition;
+ return true;
+}
+
+template<typename OverflowHandler>
+inline bool IndexSparseSet<OverflowHandler>::remove(unsigned value)
+{
+ unsigned position = m_map[value];
+ if (position >= m_values.size())
+ return false;
+
+ if (m_values[position] == value) {
+ unsigned lastValue = m_values.last();
+ m_values[position] = lastValue;
+ m_map[lastValue] = position;
+ m_values.removeLast();
+ return true;
+ }
+
+ return false;
+}
+
+template<typename OverflowHandler>
+void IndexSparseSet<OverflowHandler>::clear()
+{
+ m_values.resize(0);
+}
+
+template<typename OverflowHandler>
+unsigned IndexSparseSet<OverflowHandler>::size() const
+{
+ return m_values.size();
+}
+
+template<typename OverflowHandler>
+bool IndexSparseSet<OverflowHandler>::isEmpty() const
+{
+ return !size();
+}
+
+template<typename OverflowHandler>
+bool IndexSparseSet<OverflowHandler>::contains(unsigned value) const
+{
+ unsigned position = m_map[value];
+ if (position >= m_values.size())
+ return false;
+
+ return m_values[position] == value;
+}
+
+template<typename OverflowHandler>
+auto IndexSparseSet<OverflowHandler>::begin() const -> const_iterator
+{
+ return m_values.begin();
+}
+
+template<typename OverflowHandler>
+auto IndexSparseSet<OverflowHandler>::end() const -> const_iterator
+{
+ return m_values.end();
+}
+
+} // namespace WTF
+
+using WTF::IndexSparseSet;
+
+#endif // IndexSparseSet_h
diff --git a/Source/WTF/wtf/IndexedContainerIterator.h b/Source/WTF/wtf/IndexedContainerIterator.h
new file mode 100644
index 000000000..3e9b9e37a
--- /dev/null
+++ b/Source/WTF/wtf/IndexedContainerIterator.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <type_traits>
+
+namespace WTF {
+
+template<class Container>
+class IndexedContainerIterator {
+public:
+ IndexedContainerIterator()
+ : m_container(nullptr)
+ , m_index(0)
+ {
+ }
+
+ IndexedContainerIterator(const Container& container, unsigned index)
+ : m_container(&container)
+ , m_index(findNext(index))
+ {
+ }
+
+ auto operator*() -> typename std::result_of<decltype(&Container::at)(const Container, unsigned)>::type
+ {
+ return m_container->at(m_index);
+ }
+
+ IndexedContainerIterator& operator++()
+ {
+ m_index = findNext(m_index + 1);
+ return *this;
+ }
+
+ bool operator==(const IndexedContainerIterator& other) const
+ {
+ ASSERT(m_container == other.m_container);
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const IndexedContainerIterator& other) const
+ {
+ return !(*this == other);
+ }
+
+private:
+ unsigned findNext(unsigned index)
+ {
+ while (index < m_container->size() && !m_container->at(index))
+ index++;
+ return index;
+ }
+
+ const Container* m_container;
+ unsigned m_index;
+};
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/InlineASM.h b/Source/WTF/wtf/InlineASM.h
index 0ecc8824d..965e28176 100644
--- a/Source/WTF/wtf/InlineASM.h
+++ b/Source/WTF/wtf/InlineASM.h
@@ -26,8 +26,6 @@
#ifndef InlineASM_h
#define InlineASM_h
-#include <wtf/Platform.h>
-
/* asm directive helpers */
#if OS(DARWIN) || (OS(WINDOWS) && CPU(X86))
@@ -82,10 +80,11 @@
#if OS(DARWIN)
#define LOCAL_LABEL_STRING(name) "L" #name
#elif OS(LINUX) \
- || OS(FREEBSD) \
- || OS(OPENBSD) \
- || OS(HURD) \
- || OS(NETBSD)
+ || OS(FREEBSD) \
+ || OS(OPENBSD) \
+ || OS(HURD) \
+ || OS(NETBSD) \
+ || COMPILER(MINGW)
// GNU as-compatible syntax.
#define LOCAL_LABEL_STRING(name) ".L" #name
#endif
diff --git a/Source/WTF/wtf/Insertion.h b/Source/WTF/wtf/Insertion.h
index 9c4bccfb7..901d8bb30 100644
--- a/Source/WTF/wtf/Insertion.h
+++ b/Source/WTF/wtf/Insertion.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,15 +32,17 @@ template<typename T>
class Insertion {
public:
Insertion() { }
-
- Insertion(size_t index, T element)
+
+ template<typename U>
+ Insertion(size_t index, U&& element)
: m_index(index)
- , m_element(element)
+ , m_element(std::forward<U>(element))
{
}
size_t index() const { return m_index; }
- T element() const { return m_element; }
+ const T& element() const { return m_element; }
+ T& element() { return m_element; }
bool operator<(const Insertion& other) const
{
@@ -53,21 +55,26 @@ private:
};
template<typename TargetVectorType, typename InsertionVectorType>
-void executeInsertions(TargetVectorType& target, InsertionVectorType& insertions)
+size_t executeInsertions(TargetVectorType& target, InsertionVectorType& insertions)
{
- if (!insertions.size())
- return;
- target.grow(target.size() + insertions.size());
+ size_t numInsertions = insertions.size();
+ if (!numInsertions)
+ return 0;
+ size_t originalTargetSize = target.size();
+ target.grow(target.size() + numInsertions);
size_t lastIndex = target.size();
- for (size_t indexInInsertions = insertions.size(); indexInInsertions--;) {
+ for (size_t indexInInsertions = numInsertions; indexInInsertions--;) {
+ ASSERT(!indexInInsertions || insertions[indexInInsertions].index() >= insertions[indexInInsertions - 1].index());
+ ASSERT_UNUSED(originalTargetSize, insertions[indexInInsertions].index() <= originalTargetSize);
size_t firstIndex = insertions[indexInInsertions].index() + indexInInsertions;
size_t indexOffset = indexInInsertions + 1;
for (size_t i = lastIndex; --i > firstIndex;)
- target[i] = target[i - indexOffset];
- target[firstIndex] = insertions[indexInInsertions].element();
+ target[i] = WTFMove(target[i - indexOffset]);
+ target[firstIndex] = WTFMove(insertions[indexInInsertions].element());
lastIndex = firstIndex;
}
insertions.resize(0);
+ return numInsertions;
}
} // namespace WTF
diff --git a/Source/WTF/wtf/IteratorAdaptors.h b/Source/WTF/wtf/IteratorAdaptors.h
index 9115d4dad..6c5aa01ec 100644
--- a/Source/WTF/wtf/IteratorAdaptors.h
+++ b/Source/WTF/wtf/IteratorAdaptors.h
@@ -26,15 +26,17 @@
#ifndef WTF_IteratorAdaptors_h
#define WTF_IteratorAdaptors_h
+#include <type_traits>
+
namespace WTF {
template<typename Predicate, typename Iterator>
class FilterIterator {
public:
FilterIterator(Predicate pred, Iterator begin, Iterator end)
- : m_pred(std::move(pred))
- , m_iter(std::move(begin))
- , m_end(std::move(end))
+ : m_pred(WTFMove(pred))
+ , m_iter(WTFMove(begin))
+ , m_end(WTFMove(end))
{
while (m_iter != m_end && !m_pred(*m_iter))
++m_iter;
@@ -50,7 +52,7 @@ public:
return *this;
}
- decltype(*std::declval<Iterator>()) operator*() const
+ const typename std::remove_const<decltype(*std::declval<Iterator>())>::type operator*() const
{
ASSERT(m_iter != m_end);
ASSERT(m_pred(*m_iter));
@@ -75,9 +77,9 @@ inline FilterIterator<Predicate, Iterator> makeFilterIterator(Predicate&& pred,
template<typename Transform, typename Iterator>
class TransformIterator {
public:
- TransformIterator(const Transform& transform, const Iterator& iter)
- : m_transform(std::move(transform))
- , m_iter(std::move(iter))
+ TransformIterator(Transform&& transform, Iterator&& iter)
+ : m_transform(WTFMove(transform))
+ , m_iter(WTFMove(iter))
{
}
@@ -87,7 +89,7 @@ public:
return *this;
}
- decltype(std::declval<Transform>()(*std::declval<Iterator>())) operator*() const
+ const typename std::remove_const<decltype(std::declval<Transform>()(*std::declval<Iterator>()))>::type operator*() const
{
return m_transform(*m_iter);
}
@@ -103,7 +105,7 @@ private:
template<typename Transform, typename Iterator>
inline TransformIterator<Transform, Iterator> makeTransformIterator(Transform&& transform, Iterator&& iter)
{
- return TransformIterator<Transform, Iterator>(std::forward<Transform>(transform), std::forward<Iterator>(iter));
+ return TransformIterator<Transform, Iterator>(WTFMove(transform), WTFMove(iter));
}
} // namespace WTF
diff --git a/Source/WTF/wtf/IteratorRange.h b/Source/WTF/wtf/IteratorRange.h
index 295c21422..84df77e09 100644
--- a/Source/WTF/wtf/IteratorRange.h
+++ b/Source/WTF/wtf/IteratorRange.h
@@ -32,8 +32,8 @@ template<typename Iterator>
class IteratorRange {
public:
IteratorRange(Iterator begin, Iterator end)
- : m_begin(std::move(begin))
- , m_end(std::move(end))
+ : m_begin(WTFMove(begin))
+ , m_end(WTFMove(end))
{
}
diff --git a/Source/WTF/wtf/LEBDecoder.h b/Source/WTF/wtf/LEBDecoder.h
new file mode 100644
index 000000000..e14a52796
--- /dev/null
+++ b/Source/WTF/wtf/LEBDecoder.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Compiler.h"
+#include <algorithm>
+
+// This file contains a bunch of helper functions for decoding LEB numbers.
+// See https://en.wikipedia.org/wiki/LEB128 for more information about the
+// LEB format.
+
+namespace WTF { namespace LEBDecoder {
+
+template<typename T>
+inline bool WARN_UNUSED_RETURN decodeUInt(const uint8_t* bytes, size_t length, size_t& offset, T& result)
+{
+ const size_t numBits = sizeof(T) * CHAR_BIT;
+ const size_t maxByteLength = (numBits - 1) / 7 + 1; // numBits / 7 rounding up.
+ if (length <= offset)
+ return false;
+ result = 0;
+ unsigned shift = 0;
+ size_t last = std::min(maxByteLength, length - offset) - 1;
+ for (unsigned i = 0; true; ++i) {
+ uint8_t byte = bytes[offset++];
+ result |= static_cast<T>(byte & 0x7f) << shift;
+ shift += 7;
+ if (!(byte & 0x80))
+ return true;
+ if (i == last)
+ return false;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return true;
+}
+
+template<typename T>
+inline bool WARN_UNUSED_RETURN decodeInt(const uint8_t* bytes, size_t length, size_t& offset, T& result)
+{
+ const size_t numBits = sizeof(T) * CHAR_BIT;
+ const size_t maxByteLength = (numBits - 1) / 7 + 1; // numBits / 7 rounding up.
+ if (length <= offset)
+ return false;
+ result = 0;
+ unsigned shift = 0;
+ size_t last = std::min(maxByteLength, length - offset) - 1;
+ uint8_t byte;
+ for (unsigned i = 0; true; ++i) {
+ byte = bytes[offset++];
+ result |= static_cast<T>(byte & 0x7f) << shift;
+ shift += 7;
+ if (!(byte & 0x80))
+ break;
+ if (i == last)
+ return false;
+ }
+
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ if (shift < numBits && (byte & 0x40))
+ result = static_cast<T>(static_cast<UnsignedT>(result) | (static_cast<UnsignedT>(-1) << shift));
+ return true;
+}
+
+inline bool WARN_UNUSED_RETURN decodeUInt32(const uint8_t* bytes, size_t length, size_t& offset, uint32_t& result)
+{
+ return decodeUInt<uint32_t>(bytes, length, offset, result);
+}
+
+inline bool WARN_UNUSED_RETURN decodeUInt64(const uint8_t* bytes, size_t length, size_t& offset, uint64_t& result)
+{
+ return decodeUInt<uint64_t>(bytes, length, offset, result);
+}
+
+inline bool WARN_UNUSED_RETURN decodeInt32(const uint8_t* bytes, size_t length, size_t& offset, int32_t& result)
+{
+ return decodeInt<int32_t>(bytes, length, offset, result);
+}
+
+inline bool WARN_UNUSED_RETURN decodeInt64(const uint8_t* bytes, size_t length, size_t& offset, int64_t& result)
+{
+ return decodeInt<int64_t>(bytes, length, offset, result);
+}
+
+} } // WTF::LEBDecoder
diff --git a/Source/WTF/wtf/ListDump.h b/Source/WTF/wtf/ListDump.h
index 7e996a331..878cbe10c 100644
--- a/Source/WTF/wtf/ListDump.h
+++ b/Source/WTF/wtf/ListDump.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -43,7 +43,7 @@ public:
void dump(PrintStream& out) const
{
- for (typename T::const_iterator iter = m_list.begin(); iter != m_list.end(); ++iter)
+ for (auto iter = m_list.begin(); iter != m_list.end(); ++iter)
out.print(m_comma, *iter);
}
@@ -53,6 +53,26 @@ private:
};
template<typename T>
+class PointerListDump {
+public:
+ PointerListDump(const T& list, const char* comma)
+ : m_list(list)
+ , m_comma(comma)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ for (auto iter = m_list.begin(); iter != m_list.end(); ++iter)
+ out.print(m_comma, pointerDump(*iter));
+ }
+
+private:
+ const T& m_list;
+ CommaPrinter m_comma;
+};
+
+template<typename T>
class MapDump {
public:
MapDump(const T& map, const char* arrow, const char* comma)
@@ -64,7 +84,7 @@ public:
void dump(PrintStream& out) const
{
- for (typename T::const_iterator iter = m_map.begin(); iter != m_map.end(); ++iter)
+ for (auto iter = m_map.begin(); iter != m_map.end(); ++iter)
out.print(m_comma, iter->key, m_arrow, iter->value);
}
@@ -80,6 +100,12 @@ ListDump<T> listDump(const T& list, const char* comma = ", ")
return ListDump<T>(list, comma);
}
+template<typename T>
+PointerListDump<T> pointerListDump(const T& list, const char* comma = ", ")
+{
+ return PointerListDump<T>(list, comma);
+}
+
template<typename T, typename Comparator>
CString sortedListDump(const T& list, const Comparator& comparator, const char* comma = ", ")
{
@@ -109,7 +135,7 @@ template<typename T, typename Comparator>
CString sortedMapDump(const T& map, const Comparator& comparator, const char* arrow = "=>", const char* comma = ", ")
{
Vector<typename T::KeyType> keys;
- for (typename T::const_iterator iter = map.begin(); iter != map.end(); ++iter)
+ for (auto iter = map.begin(); iter != map.end(); ++iter)
keys.append(iter->key);
std::sort(keys.begin(), keys.end(), comparator);
StringPrintStream out;
@@ -119,11 +145,42 @@ CString sortedMapDump(const T& map, const Comparator& comparator, const char* ar
return out.toCString();
}
+template<typename T, typename U>
+class ListDumpInContext {
+public:
+ ListDumpInContext(const T& list, U* context, const char* comma)
+ : m_list(list)
+ , m_context(context)
+ , m_comma(comma)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ for (auto iter = m_list.begin(); iter != m_list.end(); ++iter)
+ out.print(m_comma, inContext(*iter, m_context));
+ }
+
+private:
+ const T& m_list;
+ U* m_context;
+ CommaPrinter m_comma;
+};
+
+template<typename T, typename U>
+ListDumpInContext<T, U> listDumpInContext(
+ const T& list, U* context, const char* comma = ", ")
+{
+ return ListDumpInContext<T, U>(list, context, comma);
+}
+
} // namespace WTF
using WTF::listDump;
-using WTF::sortedListDump;
+using WTF::listDumpInContext;
using WTF::mapDump;
+using WTF::pointerListDump;
+using WTF::sortedListDump;
using WTF::sortedMapDump;
#endif // ListDump_h
diff --git a/Source/WTF/wtf/ListHashSet.h b/Source/WTF/wtf/ListHashSet.h
index 912f0f033..278605215 100644
--- a/Source/WTF/wtf/ListHashSet.h
+++ b/Source/WTF/wtf/ListHashSet.h
@@ -19,12 +19,9 @@
*
*/
-#ifndef WTF_ListHashSet_h
-#define WTF_ListHashSet_h
+#pragma once
#include <wtf/HashSet.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
namespace WTF {
@@ -38,22 +35,20 @@ namespace WTF {
// guaranteed safe against mutation of the ListHashSet, except for
// removal of the item currently pointed to by a given iterator.
-template<typename Value, size_t inlineCapacity, typename HashFunctions> class ListHashSet;
+template<typename Value, typename HashFunctions> class ListHashSet;
-template<typename ValueArg, size_t inlineCapacity, typename HashArg> class ListHashSetIterator;
-template<typename ValueArg, size_t inlineCapacity, typename HashArg> class ListHashSetConstIterator;
+template<typename ValueArg, typename HashArg> class ListHashSetIterator;
+template<typename ValueArg, typename HashArg> class ListHashSetConstIterator;
-template<typename ValueArg, size_t inlineCapacity> struct ListHashSetNode;
-template<typename ValueArg, size_t inlineCapacity> struct ListHashSetNodeAllocator;
+template<typename ValueArg> struct ListHashSetNode;
template<typename HashArg> struct ListHashSetNodeHashFunctions;
template<typename HashArg> struct ListHashSetTranslator;
-template<typename ValueArg, size_t inlineCapacity = 256, typename HashArg = typename DefaultHash<ValueArg>::Hash> class ListHashSet {
+template<typename ValueArg, typename HashArg = typename DefaultHash<ValueArg>::Hash> class ListHashSet {
WTF_MAKE_FAST_ALLOCATED;
private:
- typedef ListHashSetNode<ValueArg, inlineCapacity> Node;
- typedef ListHashSetNodeAllocator<ValueArg, inlineCapacity> NodeAllocator;
+ typedef ListHashSetNode<ValueArg> Node;
typedef HashTraits<Node*> NodeTraits;
typedef ListHashSetNodeHashFunctions<HashArg> NodeHash;
@@ -64,24 +59,26 @@ private:
public:
typedef ValueArg ValueType;
- typedef ListHashSetIterator<ValueType, inlineCapacity, HashArg> iterator;
- typedef ListHashSetConstIterator<ValueType, inlineCapacity, HashArg> const_iterator;
- friend class ListHashSetConstIterator<ValueType, inlineCapacity, HashArg>;
+ typedef ListHashSetIterator<ValueType, HashArg> iterator;
+ typedef ListHashSetConstIterator<ValueType, HashArg> const_iterator;
+ friend class ListHashSetConstIterator<ValueType, HashArg>;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef HashTableAddResult<iterator> AddResult;
- ListHashSet();
+ ListHashSet() = default;
ListHashSet(const ListHashSet&);
+ ListHashSet(ListHashSet&&);
ListHashSet& operator=(const ListHashSet&);
+ ListHashSet& operator=(ListHashSet&&);
~ListHashSet();
void swap(ListHashSet&);
- int size() const;
- int capacity() const;
+ unsigned size() const;
+ unsigned capacity() const;
bool isEmpty() const;
iterator begin() { return makeIterator(m_head); }
@@ -153,110 +150,22 @@ private:
const_iterator makeConstIterator(Node*) const;
HashTable<Node*, Node*, IdentityExtractor, NodeHash, NodeTraits, NodeTraits> m_impl;
- Node* m_head;
- Node* m_tail;
- std::unique_ptr<NodeAllocator> m_allocator;
+ Node* m_head { nullptr };
+ Node* m_tail { nullptr };
};
-template<typename ValueArg, size_t inlineCapacity> struct ListHashSetNodeAllocator {
- typedef ListHashSetNode<ValueArg, inlineCapacity> Node;
- typedef ListHashSetNodeAllocator<ValueArg, inlineCapacity> NodeAllocator;
-
- ListHashSetNodeAllocator()
- : m_freeList(pool())
- , m_isDoneWithInitialFreeList(false)
- {
- memset(m_pool.pool, 0, sizeof(m_pool.pool));
- }
-
- Node* allocate()
- {
- Node* result = m_freeList;
-
- if (!result)
- return static_cast<Node*>(fastMalloc(sizeof(Node)));
-
- ASSERT(!result->m_isAllocated);
-
- Node* next = result->m_next;
- ASSERT(!next || !next->m_isAllocated);
- if (!next && !m_isDoneWithInitialFreeList) {
- next = result + 1;
- if (next == pastPool()) {
- m_isDoneWithInitialFreeList = true;
- next = 0;
- } else {
- ASSERT(inPool(next));
- ASSERT(!next->m_isAllocated);
- }
- }
- m_freeList = next;
-
- return result;
- }
-
- void deallocate(Node* node)
- {
- if (inPool(node)) {
-#ifndef NDEBUG
- node->m_isAllocated = false;
-#endif
- node->m_next = m_freeList;
- m_freeList = node;
- return;
- }
-
- fastFree(node);
- }
-
-private:
- Node* pool() { return reinterpret_cast_ptr<Node*>(m_pool.pool); }
- Node* pastPool() { return pool() + m_poolSize; }
- bool inPool(Node* node)
- {
- return node >= pool() && node < pastPool();
- }
-
- Node* m_freeList;
- bool m_isDoneWithInitialFreeList;
- static const size_t m_poolSize = inlineCapacity;
- union {
- char pool[sizeof(Node) * m_poolSize];
- double forAlignment;
- } m_pool;
-};
-
-template<typename ValueArg, size_t inlineCapacity> struct ListHashSetNode {
- typedef ListHashSetNodeAllocator<ValueArg, inlineCapacity> NodeAllocator;
-
+template<typename ValueArg> struct ListHashSetNode {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
template<typename T>
ListHashSetNode(T&& value)
: m_value(std::forward<T>(value))
- , m_prev(0)
- , m_next(0)
-#ifndef NDEBUG
- , m_isAllocated(true)
-#endif
{
}
- void* operator new(size_t, NodeAllocator* allocator)
- {
- return allocator->allocate();
- }
- void destroy(NodeAllocator* allocator)
- {
- this->~ListHashSetNode();
- allocator->deallocate(this);
- }
-
ValueArg m_value;
- ListHashSetNode* m_prev;
- ListHashSetNode* m_next;
-
-#ifndef NDEBUG
- bool m_isAllocated;
-#endif
+ ListHashSetNode* m_prev { nullptr };
+ ListHashSetNode* m_next { nullptr };
};
template<typename HashArg> struct ListHashSetNodeHashFunctions {
@@ -265,15 +174,15 @@ template<typename HashArg> struct ListHashSetNodeHashFunctions {
static const bool safeToCompareToEmptyOrDeleted = false;
};
-template<typename ValueArg, size_t inlineCapacity, typename HashArg> class ListHashSetIterator {
+template<typename ValueArg, typename HashArg> class ListHashSetIterator {
private:
- typedef ListHashSet<ValueArg, inlineCapacity, HashArg> ListHashSetType;
- typedef ListHashSetIterator<ValueArg, inlineCapacity, HashArg> iterator;
- typedef ListHashSetConstIterator<ValueArg, inlineCapacity, HashArg> const_iterator;
- typedef ListHashSetNode<ValueArg, inlineCapacity> Node;
+ typedef ListHashSet<ValueArg, HashArg> ListHashSetType;
+ typedef ListHashSetIterator<ValueArg, HashArg> iterator;
+ typedef ListHashSetConstIterator<ValueArg, HashArg> const_iterator;
+ typedef ListHashSetNode<ValueArg> Node;
typedef ValueArg ValueType;
- friend class ListHashSet<ValueArg, inlineCapacity, HashArg>;
+ friend class ListHashSet<ValueArg, HashArg>;
ListHashSetIterator(const ListHashSetType* set, Node* position) : m_iterator(set, position) { }
@@ -312,16 +221,16 @@ private:
const_iterator m_iterator;
};
-template<typename ValueArg, size_t inlineCapacity, typename HashArg> class ListHashSetConstIterator {
+template<typename ValueArg, typename HashArg> class ListHashSetConstIterator {
private:
- typedef ListHashSet<ValueArg, inlineCapacity, HashArg> ListHashSetType;
- typedef ListHashSetIterator<ValueArg, inlineCapacity, HashArg> iterator;
- typedef ListHashSetConstIterator<ValueArg, inlineCapacity, HashArg> const_iterator;
- typedef ListHashSetNode<ValueArg, inlineCapacity> Node;
+ typedef ListHashSet<ValueArg, HashArg> ListHashSetType;
+ typedef ListHashSetIterator<ValueArg, HashArg> iterator;
+ typedef ListHashSetConstIterator<ValueArg, HashArg> const_iterator;
+ typedef ListHashSetNode<ValueArg> Node;
typedef ValueArg ValueType;
- friend class ListHashSet<ValueArg, inlineCapacity, HashArg>;
- friend class ListHashSetIterator<ValueArg, inlineCapacity, HashArg>;
+ friend class ListHashSet<ValueArg, HashArg>;
+ friend class ListHashSetIterator<ValueArg, HashArg>;
ListHashSetConstIterator(const ListHashSetType* set, Node* position)
: m_set(set)
@@ -342,7 +251,7 @@ public:
const ValueType* get() const
{
- return &m_position->m_value;
+ return std::addressof(m_position->m_value);
}
const ValueType& operator*() const { return *get(); }
@@ -350,7 +259,7 @@ public:
const_iterator& operator++()
{
- ASSERT(m_position != 0);
+ ASSERT(m_position);
m_position = m_position->m_next;
return *this;
}
@@ -390,139 +299,144 @@ template<typename HashFunctions>
struct ListHashSetTranslator {
template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a->m_value, b); }
- template<typename T, typename U, typename V> static void translate(T*& location, U&& key, const V& allocator)
+ template<typename T, typename U, typename V> static void translate(T*& location, U&& key, V&&)
{
- location = new (allocator) T(std::forward<U>(key));
+ location = new T(std::forward<U>(key));
}
};
-template<typename T, size_t inlineCapacity, typename U>
-inline ListHashSet<T, inlineCapacity, U>::ListHashSet()
- : m_head(0)
- , m_tail(0)
- , m_allocator(std::make_unique<NodeAllocator>())
-{
-}
-
-template<typename T, size_t inlineCapacity, typename U>
-inline ListHashSet<T, inlineCapacity, U>::ListHashSet(const ListHashSet& other)
- : m_head(0)
- , m_tail(0)
- , m_allocator(std::make_unique<NodeAllocator>())
+template<typename T, typename U>
+inline ListHashSet<T, U>::ListHashSet(const ListHashSet& other)
{
for (auto it = other.begin(), end = other.end(); it != end; ++it)
add(*it);
}
-template<typename T, size_t inlineCapacity, typename U>
-inline ListHashSet<T, inlineCapacity, U>& ListHashSet<T, inlineCapacity, U>::operator=(const ListHashSet& other)
+template<typename T, typename U>
+inline ListHashSet<T, U>& ListHashSet<T, U>::operator=(const ListHashSet& other)
{
ListHashSet tmp(other);
swap(tmp);
return *this;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline void ListHashSet<T, inlineCapacity, U>::swap(ListHashSet& other)
+template<typename T, typename U>
+inline ListHashSet<T, U>::ListHashSet(ListHashSet&& other)
+ : m_impl(WTFMove(other.m_impl))
+ , m_head(std::exchange(other.m_head, nullptr))
+ , m_tail(std::exchange(other.m_tail, nullptr))
+{
+}
+
+template<typename T, typename U>
+inline ListHashSet<T, U>& ListHashSet<T, U>::operator=(ListHashSet&& other)
+{
+ m_impl = WTFMove(other.m_impl);
+ m_head = std::exchange(other.m_head, nullptr);
+ m_tail = std::exchange(other.m_tail, nullptr);
+ return *this;
+}
+
+template<typename T, typename U>
+inline void ListHashSet<T, U>::swap(ListHashSet& other)
{
m_impl.swap(other.m_impl);
std::swap(m_head, other.m_head);
std::swap(m_tail, other.m_tail);
- m_allocator.swap(other.m_allocator);
}
-template<typename T, size_t inlineCapacity, typename U>
-inline ListHashSet<T, inlineCapacity, U>::~ListHashSet()
+template<typename T, typename U>
+inline ListHashSet<T, U>::~ListHashSet()
{
deleteAllNodes();
}
-template<typename T, size_t inlineCapacity, typename U>
-inline int ListHashSet<T, inlineCapacity, U>::size() const
+template<typename T, typename U>
+inline unsigned ListHashSet<T, U>::size() const
{
return m_impl.size();
}
-template<typename T, size_t inlineCapacity, typename U>
-inline int ListHashSet<T, inlineCapacity, U>::capacity() const
+template<typename T, typename U>
+inline unsigned ListHashSet<T, U>::capacity() const
{
return m_impl.capacity();
}
-template<typename T, size_t inlineCapacity, typename U>
-inline bool ListHashSet<T, inlineCapacity, U>::isEmpty() const
+template<typename T, typename U>
+inline bool ListHashSet<T, U>::isEmpty() const
{
return m_impl.isEmpty();
}
-template<typename T, size_t inlineCapacity, typename U>
-inline T& ListHashSet<T, inlineCapacity, U>::first()
+template<typename T, typename U>
+inline T& ListHashSet<T, U>::first()
{
ASSERT(!isEmpty());
return m_head->m_value;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline void ListHashSet<T, inlineCapacity, U>::removeFirst()
+template<typename T, typename U>
+inline void ListHashSet<T, U>::removeFirst()
{
takeFirst();
}
-template<typename T, size_t inlineCapacity, typename U>
-inline T ListHashSet<T, inlineCapacity, U>::takeFirst()
+template<typename T, typename U>
+inline T ListHashSet<T, U>::takeFirst()
{
ASSERT(!isEmpty());
auto it = m_impl.find(m_head);
- T result = std::move((*it)->m_value);
+ T result = WTFMove((*it)->m_value);
m_impl.remove(it);
unlinkAndDelete(m_head);
return result;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline const T& ListHashSet<T, inlineCapacity, U>::first() const
+template<typename T, typename U>
+inline const T& ListHashSet<T, U>::first() const
{
ASSERT(!isEmpty());
return m_head->m_value;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline T& ListHashSet<T, inlineCapacity, U>::last()
+template<typename T, typename U>
+inline T& ListHashSet<T, U>::last()
{
ASSERT(!isEmpty());
return m_tail->m_value;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline const T& ListHashSet<T, inlineCapacity, U>::last() const
+template<typename T, typename U>
+inline const T& ListHashSet<T, U>::last() const
{
ASSERT(!isEmpty());
return m_tail->m_value;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline void ListHashSet<T, inlineCapacity, U>::removeLast()
+template<typename T, typename U>
+inline void ListHashSet<T, U>::removeLast()
{
takeLast();
}
-template<typename T, size_t inlineCapacity, typename U>
-inline T ListHashSet<T, inlineCapacity, U>::takeLast()
+template<typename T, typename U>
+inline T ListHashSet<T, U>::takeLast()
{
ASSERT(!isEmpty());
auto it = m_impl.find(m_tail);
- T result = std::move((*it)->m_value);
+ T result = WTFMove((*it)->m_value);
m_impl.remove(it);
unlinkAndDelete(m_tail);
return result;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline auto ListHashSet<T, inlineCapacity, U>::find(const ValueType& value) -> iterator
+template<typename T, typename U>
+inline auto ListHashSet<T, U>::find(const ValueType& value) -> iterator
{
auto it = m_impl.template find<BaseTranslator>(value);
if (it == m_impl.end())
@@ -530,8 +444,8 @@ inline auto ListHashSet<T, inlineCapacity, U>::find(const ValueType& value) -> i
return makeIterator(*it);
}
-template<typename T, size_t inlineCapacity, typename U>
-inline auto ListHashSet<T, inlineCapacity, U>::find(const ValueType& value) const -> const_iterator
+template<typename T, typename U>
+inline auto ListHashSet<T, U>::find(const ValueType& value) const -> const_iterator
{
auto it = m_impl.template find<BaseTranslator>(value);
if (it == m_impl.end())
@@ -545,9 +459,9 @@ struct ListHashSetTranslatorAdapter {
template<typename T, typename U> static bool equal(const T& a, const U& b) { return Translator::equal(a->m_value, b); }
};
-template<typename ValueType, size_t inlineCapacity, typename U>
+template<typename ValueType, typename U>
template<typename T, typename HashTranslator>
-inline auto ListHashSet<ValueType, inlineCapacity, U>::find(const T& value) -> iterator
+inline auto ListHashSet<ValueType, U>::find(const T& value) -> iterator
{
auto it = m_impl.template find<ListHashSetTranslatorAdapter<HashTranslator>>(value);
if (it == m_impl.end())
@@ -555,9 +469,9 @@ inline auto ListHashSet<ValueType, inlineCapacity, U>::find(const T& value) -> i
return makeIterator(*it);
}
-template<typename ValueType, size_t inlineCapacity, typename U>
+template<typename ValueType, typename U>
template<typename T, typename HashTranslator>
-inline auto ListHashSet<ValueType, inlineCapacity, U>::find(const T& value) const -> const_iterator
+inline auto ListHashSet<ValueType, U>::find(const T& value) const -> const_iterator
{
auto it = m_impl.template find<ListHashSetTranslatorAdapter<HashTranslator>>(value);
if (it == m_impl.end())
@@ -565,41 +479,41 @@ inline auto ListHashSet<ValueType, inlineCapacity, U>::find(const T& value) cons
return makeConstIterator(*it);
}
-template<typename ValueType, size_t inlineCapacity, typename U>
+template<typename ValueType, typename U>
template<typename T, typename HashTranslator>
-inline bool ListHashSet<ValueType, inlineCapacity, U>::contains(const T& value) const
+inline bool ListHashSet<ValueType, U>::contains(const T& value) const
{
return m_impl.template contains<ListHashSetTranslatorAdapter<HashTranslator>>(value);
}
-template<typename T, size_t inlineCapacity, typename U>
-inline bool ListHashSet<T, inlineCapacity, U>::contains(const ValueType& value) const
+template<typename T, typename U>
+inline bool ListHashSet<T, U>::contains(const ValueType& value) const
{
return m_impl.template contains<BaseTranslator>(value);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::add(const ValueType& value) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::add(const ValueType& value) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(value, m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(value, nullptr);
if (result.isNewEntry)
appendNode(*result.iterator);
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::add(ValueType&& value) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::add(ValueType&& value) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(std::move(value), m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(WTFMove(value), nullptr);
if (result.isNewEntry)
appendNode(*result.iterator);
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::appendOrMoveToLast(const ValueType& value) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::appendOrMoveToLast(const ValueType& value) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(value, m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(value, nullptr);
Node* node = *result.iterator;
if (!result.isNewEntry)
unlink(node);
@@ -608,10 +522,10 @@ auto ListHashSet<T, inlineCapacity, U>::appendOrMoveToLast(const ValueType& valu
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::appendOrMoveToLast(ValueType&& value) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::appendOrMoveToLast(ValueType&& value) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(std::move(value), m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(WTFMove(value), nullptr);
Node* node = *result.iterator;
if (!result.isNewEntry)
unlink(node);
@@ -620,10 +534,10 @@ auto ListHashSet<T, inlineCapacity, U>::appendOrMoveToLast(ValueType&& value) ->
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::prependOrMoveToFirst(const ValueType& value) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::prependOrMoveToFirst(const ValueType& value) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(value, m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(value, nullptr);
Node* node = *result.iterator;
if (!result.isNewEntry)
unlink(node);
@@ -632,10 +546,10 @@ auto ListHashSet<T, inlineCapacity, U>::prependOrMoveToFirst(const ValueType& va
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::prependOrMoveToFirst(ValueType&& value) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::prependOrMoveToFirst(ValueType&& value) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(std::move(value), m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(WTFMove(value), nullptr);
Node* node = *result.iterator;
if (!result.isNewEntry)
unlink(node);
@@ -644,38 +558,38 @@ auto ListHashSet<T, inlineCapacity, U>::prependOrMoveToFirst(ValueType&& value)
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::insertBefore(const ValueType& beforeValue, const ValueType& newValue) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::insertBefore(const ValueType& beforeValue, const ValueType& newValue) -> AddResult
{
return insertBefore(find(beforeValue), newValue);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::insertBefore(const ValueType& beforeValue, ValueType&& newValue) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::insertBefore(const ValueType& beforeValue, ValueType&& newValue) -> AddResult
{
- return insertBefore(find(beforeValue), std::move(newValue));
+ return insertBefore(find(beforeValue), WTFMove(newValue));
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::insertBefore(iterator it, const ValueType& newValue) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::insertBefore(iterator it, const ValueType& newValue) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(newValue, m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(newValue, nullptr);
if (result.isNewEntry)
insertNodeBefore(it.node(), *result.iterator);
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-auto ListHashSet<T, inlineCapacity, U>::insertBefore(iterator it, ValueType&& newValue) -> AddResult
+template<typename T, typename U>
+auto ListHashSet<T, U>::insertBefore(iterator it, ValueType&& newValue) -> AddResult
{
- auto result = m_impl.template add<BaseTranslator>(std::move(newValue), m_allocator.get());
+ auto result = m_impl.template add<BaseTranslator>(WTFMove(newValue), nullptr);
if (result.isNewEntry)
insertNodeBefore(it.node(), *result.iterator);
return AddResult(makeIterator(*result.iterator), result.isNewEntry);
}
-template<typename T, size_t inlineCapacity, typename U>
-inline bool ListHashSet<T, inlineCapacity, U>::remove(iterator it)
+template<typename T, typename U>
+inline bool ListHashSet<T, U>::remove(iterator it)
{
if (it == end())
return false;
@@ -684,23 +598,23 @@ inline bool ListHashSet<T, inlineCapacity, U>::remove(iterator it)
return true;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline bool ListHashSet<T, inlineCapacity, U>::remove(const ValueType& value)
+template<typename T, typename U>
+inline bool ListHashSet<T, U>::remove(const ValueType& value)
{
return remove(find(value));
}
-template<typename T, size_t inlineCapacity, typename U>
-inline void ListHashSet<T, inlineCapacity, U>::clear()
+template<typename T, typename U>
+inline void ListHashSet<T, U>::clear()
{
deleteAllNodes();
m_impl.clear();
- m_head = 0;
- m_tail = 0;
+ m_head = nullptr;
+ m_tail = nullptr;
}
-template<typename T, size_t inlineCapacity, typename U>
-void ListHashSet<T, inlineCapacity, U>::unlink(Node* node)
+template<typename T, typename U>
+void ListHashSet<T, U>::unlink(Node* node)
{
if (!node->m_prev) {
ASSERT(node == m_head);
@@ -719,18 +633,18 @@ void ListHashSet<T, inlineCapacity, U>::unlink(Node* node)
}
}
-template<typename T, size_t inlineCapacity, typename U>
-void ListHashSet<T, inlineCapacity, U>::unlinkAndDelete(Node* node)
+template<typename T, typename U>
+void ListHashSet<T, U>::unlinkAndDelete(Node* node)
{
unlink(node);
- node->destroy(m_allocator.get());
+ delete node;
}
-template<typename T, size_t inlineCapacity, typename U>
-void ListHashSet<T, inlineCapacity, U>::appendNode(Node* node)
+template<typename T, typename U>
+void ListHashSet<T, U>::appendNode(Node* node)
{
node->m_prev = m_tail;
- node->m_next = 0;
+ node->m_next = nullptr;
if (m_tail) {
ASSERT(m_head);
@@ -743,10 +657,10 @@ void ListHashSet<T, inlineCapacity, U>::appendNode(Node* node)
m_tail = node;
}
-template<typename T, size_t inlineCapacity, typename U>
-void ListHashSet<T, inlineCapacity, U>::prependNode(Node* node)
+template<typename T, typename U>
+void ListHashSet<T, U>::prependNode(Node* node)
{
- node->m_prev = 0;
+ node->m_prev = nullptr;
node->m_next = m_head;
if (m_head)
@@ -757,8 +671,8 @@ void ListHashSet<T, inlineCapacity, U>::prependNode(Node* node)
m_head = node;
}
-template<typename T, size_t inlineCapacity, typename U>
-void ListHashSet<T, inlineCapacity, U>::insertNodeBefore(Node* beforeNode, Node* newNode)
+template<typename T, typename U>
+void ListHashSet<T, U>::insertNodeBefore(Node* beforeNode, Node* newNode)
{
if (!beforeNode)
return appendNode(newNode);
@@ -773,24 +687,24 @@ void ListHashSet<T, inlineCapacity, U>::insertNodeBefore(Node* beforeNode, Node*
m_head = newNode;
}
-template<typename T, size_t inlineCapacity, typename U>
-void ListHashSet<T, inlineCapacity, U>::deleteAllNodes()
+template<typename T, typename U>
+void ListHashSet<T, U>::deleteAllNodes()
{
if (!m_head)
return;
- for (Node* node = m_head, *next = m_head->m_next; node; node = next, next = node ? node->m_next : 0)
- node->destroy(m_allocator.get());
+ for (Node* node = m_head, *next = m_head->m_next; node; node = next, next = node ? node->m_next : nullptr)
+ delete node;
}
-template<typename T, size_t inlineCapacity, typename U>
-inline auto ListHashSet<T, inlineCapacity, U>::makeIterator(Node* position) -> iterator
+template<typename T, typename U>
+inline auto ListHashSet<T, U>::makeIterator(Node* position) -> iterator
{
return iterator(this, position);
}
-template<typename T, size_t inlineCapacity, typename U>
-inline auto ListHashSet<T, inlineCapacity, U>::makeConstIterator(Node* position) const -> const_iterator
+template<typename T, typename U>
+inline auto ListHashSet<T, U>::makeConstIterator(Node* position) const -> const_iterator
{
return const_iterator(this, position);
}
@@ -798,5 +712,3 @@ inline auto ListHashSet<T, inlineCapacity, U>::makeConstIterator(Node* position)
} // namespace WTF
using WTF::ListHashSet;
-
-#endif /* WTF_ListHashSet_h */
diff --git a/Source/WTF/wtf/Lock.cpp b/Source/WTF/wtf/Lock.cpp
new file mode 100644
index 000000000..1d295f68e
--- /dev/null
+++ b/Source/WTF/wtf/Lock.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Lock.h"
+
+namespace WTF {
+
+void LockBase::lockSlow()
+{
+ DefaultLockAlgorithm::lockSlow(m_byte);
+}
+
+void LockBase::unlockSlow()
+{
+ DefaultLockAlgorithm::unlockSlow(m_byte, DefaultLockAlgorithm::Unfair);
+}
+
+void LockBase::unlockFairlySlow()
+{
+ DefaultLockAlgorithm::unlockSlow(m_byte, DefaultLockAlgorithm::Fair);
+}
+
+void LockBase::safepointSlow()
+{
+ DefaultLockAlgorithm::safepointSlow(m_byte);
+}
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/Lock.h b/Source/WTF/wtf/Lock.h
new file mode 100644
index 000000000..9dec84c1f
--- /dev/null
+++ b/Source/WTF/wtf/Lock.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Lock_h
+#define WTF_Lock_h
+
+#include <wtf/LockAlgorithm.h>
+#include <wtf/Locker.h>
+#include <wtf/Noncopyable.h>
+
+namespace TestWebKitAPI {
+struct LockInspector;
+};
+
+namespace WTF {
+
+typedef LockAlgorithm<uint8_t, 1, 2> DefaultLockAlgorithm;
+
+// This is a fully adaptive mutex that only requires 1 byte of storage. It has fast paths that are
+// competetive to a spinlock (uncontended locking is inlined and is just a CAS, microcontention is
+// handled by spinning and yielding), and a slow path that is competetive to std::mutex (if a lock
+// cannot be acquired in a short period of time, the thread is put to sleep until the lock is
+// available again). It uses less memory than a std::mutex. This lock guarantees eventual stochastic
+// fairness, even in programs that relock the lock immediately after unlocking it. Except when there
+// are collisions between this lock and other locks in the ParkingLot, this lock will guarantee that
+// at worst one call to unlock() per millisecond will do a direct hand-off to the thread that is at
+// the head of the queue. When there are collisions, each collision increases the fair unlock delay
+// by one millisecond in the worst case.
+
+// This is a struct without a constructor or destructor so that it can be statically initialized.
+// Use Lock in instance variables.
+struct LockBase {
+ void lock()
+ {
+ if (UNLIKELY(!DefaultLockAlgorithm::lockFastAssumingZero(m_byte)))
+ lockSlow();
+ }
+
+ bool tryLock()
+ {
+ return DefaultLockAlgorithm::tryLock(m_byte);
+ }
+
+ // Need this version for std::unique_lock.
+ bool try_lock()
+ {
+ return tryLock();
+ }
+
+ // Relinquish the lock. Either one of the threads that were waiting for the lock, or some other
+ // thread that happens to be running, will be able to grab the lock. This bit of unfairness is
+ // called barging, and we allow it because it maximizes throughput. However, we bound how unfair
+ // barging can get by ensuring that every once in a while, when there is a thread waiting on the
+ // lock, we hand the lock to that thread directly. Every time unlock() finds a thread waiting,
+ // we check if the last time that we did a fair unlock was more than roughly 1ms ago; if so, we
+ // unlock fairly. Fairness matters most for long critical sections, and this virtually
+ // guarantees that long critical sections always get a fair lock.
+ void unlock()
+ {
+ if (UNLIKELY(!DefaultLockAlgorithm::unlockFastAssumingZero(m_byte)))
+ unlockSlow();
+ }
+
+ // This is like unlock() but it guarantees that we unlock the lock fairly. For short critical
+ // sections, this is much slower than unlock(). For long critical sections, unlock() will learn
+ // to be fair anyway. However, if you plan to relock the lock right after unlocking and you want
+ // to ensure that some other thread runs in the meantime, this is probably the function you
+ // want.
+ void unlockFairly()
+ {
+ if (UNLIKELY(!DefaultLockAlgorithm::unlockFastAssumingZero(m_byte)))
+ unlockFairlySlow();
+ }
+
+ void safepoint()
+ {
+ if (UNLIKELY(!DefaultLockAlgorithm::safepointFast(m_byte)))
+ safepointSlow();
+ }
+
+ bool isHeld() const
+ {
+ return DefaultLockAlgorithm::isLocked(m_byte);
+ }
+
+ bool isLocked() const
+ {
+ return isHeld();
+ }
+
+protected:
+ friend struct TestWebKitAPI::LockInspector;
+
+ static const uint8_t isHeldBit = 1;
+ static const uint8_t hasParkedBit = 2;
+
+ WTF_EXPORT_PRIVATE void lockSlow();
+ WTF_EXPORT_PRIVATE void unlockSlow();
+ WTF_EXPORT_PRIVATE void unlockFairlySlow();
+ WTF_EXPORT_PRIVATE void safepointSlow();
+
+ // Method used for testing only.
+ bool isFullyReset() const
+ {
+ return !m_byte.load();
+ }
+
+ Atomic<uint8_t> m_byte;
+};
+
+class Lock : public LockBase {
+ WTF_MAKE_NONCOPYABLE(Lock);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ Lock()
+ {
+ m_byte.store(0, std::memory_order_relaxed);
+ }
+};
+
+typedef LockBase StaticLock;
+typedef Locker<LockBase> LockHolder;
+
+} // namespace WTF
+
+using WTF::Lock;
+using WTF::LockHolder;
+using WTF::StaticLock;
+
+#endif // WTF_Lock_h
+
diff --git a/Source/WTF/wtf/LockAlgorithm.h b/Source/WTF/wtf/LockAlgorithm.h
new file mode 100644
index 000000000..856788d79
--- /dev/null
+++ b/Source/WTF/wtf/LockAlgorithm.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_LockAlgorithm_h
+#define WTF_LockAlgorithm_h
+
+#include <thread>
+#include <wtf/Atomics.h>
+#include <wtf/Compiler.h>
+#include <wtf/ParkingLot.h>
+
+namespace WTF {
+
+// This is the algorithm used by WTF::Lock. You can use it to project one lock onto any atomic
+// field. The limit of one lock is due to the use of the field's address as a key to find the lock's
+// queue.
+
+template<typename LockType, LockType isHeldBit, LockType hasParkedBit>
+class LockAlgorithm {
+ static const bool verbose = false;
+ static const LockType mask = isHeldBit | hasParkedBit;
+
+public:
+ static bool lockFastAssumingZero(Atomic<LockType>& lock)
+ {
+ return lock.compareExchangeWeak(0, isHeldBit, std::memory_order_acquire);
+ }
+
+ static bool lockFast(Atomic<LockType>& lock)
+ {
+ LockType oldValue = lock.load(std::memory_order_relaxed);
+ if (oldValue & isHeldBit)
+ return false;
+ return lock.compareExchangeWeak(oldValue, oldValue | isHeldBit, std::memory_order_acquire);
+ }
+
+ static void lock(Atomic<LockType>& lock)
+ {
+ if (UNLIKELY(!lockFast(lock)))
+ lockSlow(lock);
+ }
+
+ static bool tryLock(Atomic<LockType>& lock)
+ {
+ for (;;) {
+ uint8_t currentByteValue = lock.load(std::memory_order_relaxed);
+ if (currentByteValue & isHeldBit)
+ return false;
+ if (lock.compareExchangeWeak(currentByteValue, currentByteValue | isHeldBit, std::memory_order_acquire))
+ return true;
+ }
+ }
+
+ static bool unlockFastAssumingZero(Atomic<LockType>& lock)
+ {
+ return lock.compareExchangeWeak(isHeldBit, 0, std::memory_order_release);
+ }
+
+ static bool unlockFast(Atomic<LockType>& lock)
+ {
+ LockType oldValue = lock.load(std::memory_order_relaxed);
+ if ((oldValue & mask) != isHeldBit)
+ return false;
+ return lock.compareExchangeWeak(oldValue, oldValue & ~isHeldBit, std::memory_order_release);
+ }
+
+ static void unlock(Atomic<LockType>& lock)
+ {
+ if (UNLIKELY(!unlockFast(lock)))
+ unlockSlow(lock, Unfair);
+ }
+
+ static void unlockFairly(Atomic<LockType>& lock)
+ {
+ if (UNLIKELY(!unlockFast(lock)))
+ unlockSlow(lock, Fair);
+ }
+
+ static bool safepointFast(const Atomic<LockType>& lock)
+ {
+ WTF::compilerFence();
+ return !(lock.load(std::memory_order_relaxed) & hasParkedBit);
+ }
+
+ static void safepoint(Atomic<LockType>& lock)
+ {
+ if (UNLIKELY(!safepointFast(lock)))
+ safepointSlow(lock);
+ }
+
+ static bool isLocked(const Atomic<LockType>& lock)
+ {
+ return lock.load(std::memory_order_acquire) & isHeldBit;
+ }
+
+ NEVER_INLINE static void lockSlow(Atomic<LockType>& lock)
+ {
+ unsigned spinCount = 0;
+
+ // This magic number turns out to be optimal based on past JikesRVM experiments.
+ const unsigned spinLimit = 40;
+
+ for (;;) {
+ uint8_t currentByteValue = lock.load();
+
+ // We allow ourselves to barge in.
+ if (!(currentByteValue & isHeldBit)
+ && lock.compareExchangeWeak(currentByteValue, currentByteValue | isHeldBit))
+ return;
+
+ // If there is nobody parked and we haven't spun too much, we can just try to spin around.
+ if (!(currentByteValue & hasParkedBit) && spinCount < spinLimit) {
+ spinCount++;
+ std::this_thread::yield();
+ continue;
+ }
+
+ // Need to park. We do this by setting the parked bit first, and then parking. We spin around
+ // if the parked bit wasn't set and we failed at setting it.
+ if (!(currentByteValue & hasParkedBit)
+ && !lock.compareExchangeWeak(currentByteValue, currentByteValue | hasParkedBit))
+ continue;
+
+ // We now expect the value to be isHeld|hasParked. So long as that's the case, we can park.
+ ParkingLot::ParkResult parkResult =
+ ParkingLot::compareAndPark(&lock, currentByteValue | isHeldBit | hasParkedBit);
+ if (parkResult.wasUnparked) {
+ switch (static_cast<Token>(parkResult.token)) {
+ case DirectHandoff:
+ // The lock was never released. It was handed to us directly by the thread that did
+ // unlock(). This means we're done!
+ RELEASE_ASSERT(isLocked(lock));
+ return;
+ case BargingOpportunity:
+ // This is the common case. The thread that called unlock() has released the lock,
+ // and we have been woken up so that we may get an opportunity to grab the lock. But
+ // other threads may barge, so the best that we can do is loop around and try again.
+ break;
+ }
+ }
+
+ // We have awoken, or we never parked because the byte value changed. Either way, we loop
+ // around and try again.
+ }
+ }
+
+ enum Fairness {
+ Fair,
+ Unfair
+ };
+ NEVER_INLINE static void unlockSlow(Atomic<LockType>& lock, Fairness fairness)
+ {
+ // We could get here because the weak CAS in unlock() failed spuriously, or because there is
+ // someone parked. So, we need a CAS loop: even if right now the lock is just held, it could
+ // be held and parked if someone attempts to lock just as we are unlocking.
+ for (;;) {
+ uint8_t oldByteValue = lock.load();
+ RELEASE_ASSERT(
+ (oldByteValue & mask) == isHeldBit
+ || (oldByteValue & mask) == (isHeldBit | hasParkedBit));
+
+ if ((oldByteValue & mask) == isHeldBit) {
+ if (lock.compareExchangeWeak(oldByteValue, oldByteValue & ~isHeldBit))
+ return;
+ continue;
+ }
+
+ // Someone is parked. Unpark exactly one thread. We may hand the lock to that thread
+ // directly, or we will unlock the lock at the same time as we unpark to allow for barging.
+ // When we unlock, we may leave the parked bit set if there is a chance that there are still
+ // other threads parked.
+ ASSERT((oldByteValue & mask) == (isHeldBit | hasParkedBit));
+ ParkingLot::unparkOne(
+ &lock,
+ [&] (ParkingLot::UnparkResult result) -> intptr_t {
+ // We are the only ones that can clear either the isHeldBit or the hasParkedBit,
+ // so we should still see both bits set right now.
+ ASSERT((lock.load() & mask) == (isHeldBit | hasParkedBit));
+
+ if (result.didUnparkThread && (fairness == Fair || result.timeToBeFair)) {
+ // We don't unlock anything. Instead, we hand the lock to the thread that was
+ // waiting.
+ return DirectHandoff;
+ }
+
+ lock.transaction(
+ [&] (LockType& value) {
+ value &= ~mask;
+ if (result.mayHaveMoreThreads)
+ value |= hasParkedBit;
+ });
+ return BargingOpportunity;
+ });
+ return;
+ }
+ }
+
+ NEVER_INLINE static void safepointSlow(Atomic<LockType>& lockWord)
+ {
+ unlockFairly(lockWord);
+ lock(lockWord);
+ }
+
+private:
+ enum Token {
+ BargingOpportunity,
+ DirectHandoff
+ };
+};
+
+} // namespace WTF
+
+using WTF::LockAlgorithm;
+
+#endif // WTF_LockAlgorithm_h
+
diff --git a/Source/WTF/wtf/LockedPrintStream.cpp b/Source/WTF/wtf/LockedPrintStream.cpp
new file mode 100644
index 000000000..2b69d318e
--- /dev/null
+++ b/Source/WTF/wtf/LockedPrintStream.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LockedPrintStream.h"
+
+namespace WTF {
+
+LockedPrintStream::LockedPrintStream(std::unique_ptr<PrintStream> target)
+ : m_target(WTFMove(target))
+{
+}
+
+LockedPrintStream::~LockedPrintStream()
+{
+}
+
+void LockedPrintStream::vprintf(const char* format, va_list args)
+{
+ auto locker = holdLock(m_lock);
+ m_target->vprintf(format, args);
+}
+
+void LockedPrintStream::flush()
+{
+ auto locker = holdLock(m_lock);
+ m_target->flush();
+}
+
+PrintStream& LockedPrintStream::begin()
+{
+ m_lock.lock();
+ return *m_target;
+}
+
+void LockedPrintStream::end()
+{
+ m_lock.unlock();
+}
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/LockedPrintStream.h b/Source/WTF/wtf/LockedPrintStream.h
new file mode 100644
index 000000000..6eb90aae8
--- /dev/null
+++ b/Source/WTF/wtf/LockedPrintStream.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/PrintStream.h>
+#include <wtf/RecursiveLockAdapter.h>
+#include <wtf/WordLock.h>
+
+namespace WTF {
+
+// Makes every call to print() atomic.
+class LockedPrintStream : public PrintStream {
+public:
+ LockedPrintStream(std::unique_ptr<PrintStream> target);
+ virtual ~LockedPrintStream();
+
+ void vprintf(const char* format, va_list) override WTF_ATTRIBUTE_PRINTF(2, 0);
+ void flush() override;
+
+protected:
+ PrintStream& begin() override;
+ void end() override;
+
+private:
+ // This needs to be a recursive lock because a printInternal or dump method could assert,
+ // and that assert might want to log. Better to let it. This needs to be a WordLock so that
+ // LockedPrintStream (i.e. cataLog) can be used to debug ParkingLot and Lock.
+ RecursiveLockAdapter<WordLock> m_lock;
+ std::unique_ptr<PrintStream> m_target;
+};
+
+} // namespace WTF
+
+using WTF::LockedPrintStream;
+
diff --git a/Source/WTF/wtf/Locker.h b/Source/WTF/wtf/Locker.h
index ad88546fd..ca41f3028 100644
--- a/Source/WTF/wtf/Locker.h
+++ b/Source/WTF/wtf/Locker.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -28,26 +28,81 @@
#ifndef Locker_h
#define Locker_h
+#include <wtf/Assertions.h>
#include <wtf/Noncopyable.h>
namespace WTF {
-template <typename T> class Locker {
- WTF_MAKE_NONCOPYABLE(Locker);
+enum NoLockingNecessaryTag { NoLockingNecessary };
+
+class AbstractLocker {
+ WTF_MAKE_NONCOPYABLE(AbstractLocker);
+public:
+ AbstractLocker(NoLockingNecessaryTag)
+ {
+ }
+
+protected:
+ AbstractLocker()
+ {
+ }
+};
+
+template <typename T> class Locker : public AbstractLocker {
public:
explicit Locker(T& lockable) : m_lockable(&lockable) { lock(); }
explicit Locker(T* lockable) : m_lockable(lockable) { lock(); }
+
+ // You should be wary of using this constructor. It's only applicable
+ // in places where there is a locking protocol for a particular object
+ // but it's not necessary to engage in that protocol yet. For example,
+ // this often happens when an object is newly allocated and it can not
+ // be accessed concurrently.
+ Locker(NoLockingNecessaryTag) : m_lockable(nullptr) { }
+
+ Locker(int) = delete;
+
~Locker()
{
if (m_lockable)
m_lockable->unlock();
}
+ static Locker tryLock(T& lockable)
+ {
+ Locker result(NoLockingNecessary);
+ if (lockable.tryLock()) {
+ result.m_lockable = &lockable;
+ return result;
+ }
+ return result;
+ }
+
+ explicit operator bool() const { return !!m_lockable; }
+
void unlockEarly()
{
m_lockable->unlock();
m_lockable = 0;
}
+
+ // It's great to be able to pass lockers around. It enables custom locking adaptors like
+ // JSC::LockDuringMarking.
+ Locker(Locker&& other)
+ : m_lockable(other.m_lockable)
+ {
+ other.m_lockable = nullptr;
+ }
+
+ Locker& operator=(Locker&& other)
+ {
+ if (m_lockable)
+ m_lockable->unlock();
+ m_lockable = other.m_lockable;
+ other.m_lockable = nullptr;
+ return *this;
+ }
+
private:
void lock()
{
@@ -58,8 +113,26 @@ private:
T* m_lockable;
};
+// Use this lock scope like so:
+// auto locker = holdLock(lock);
+template<typename LockType>
+Locker<LockType> holdLock(LockType& lock)
+{
+ return Locker<LockType>(lock);
+}
+
+template<typename LockType>
+Locker<LockType> tryHoldLock(LockType& lock)
+{
+ return Locker<LockType>::tryLock(lock);
+}
+
}
+using WTF::AbstractLocker;
using WTF::Locker;
+using WTF::NoLockingNecessaryTag;
+using WTF::NoLockingNecessary;
+using WTF::holdLock;
#endif
diff --git a/Source/WTF/wtf/LoggingAccumulator.h b/Source/WTF/wtf/LoggingAccumulator.h
new file mode 100644
index 000000000..a8e23206d
--- /dev/null
+++ b/Source/WTF/wtf/LoggingAccumulator.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE void resetAccumulatedLogs();
+WTF_EXPORT_PRIVATE String getAndResetAccumulatedLogs();
+
+} // namespace WTF
+
+using WTF::resetAccumulatedLogs;
+using WTF::getAndResetAccumulatedLogs;
diff --git a/Source/WTF/wtf/MD5.cpp b/Source/WTF/wtf/MD5.cpp
index 36549be83..55080f215 100644
--- a/Source/WTF/wtf/MD5.cpp
+++ b/Source/WTF/wtf/MD5.cpp
@@ -2,6 +2,7 @@
// Modifications Copyright 2006 Google Inc. All Rights Reserved
/*
* Copyright (C) 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -58,6 +59,25 @@
namespace WTF {
+#if PLATFORM(COCOA)
+
+MD5::MD5()
+{
+ CC_MD5_Init(&m_context);
+}
+
+void MD5::addBytes(const uint8_t* input, size_t length)
+{
+ CC_MD5_Update(&m_context, input, length);
+}
+
+void MD5::checksum(Digest& hash)
+{
+ CC_MD5_Final(hash.data(), &m_context);
+}
+
+#else
+
// Note: this code is harmless on little-endian machines.
static void toLittleEndian(uint8_t* buf, unsigned longs)
@@ -264,4 +284,6 @@ void MD5::checksum(Digest& digest)
memset(m_in, 0, sizeof(m_in));
}
+#endif
+
} // namespace WTF
diff --git a/Source/WTF/wtf/MD5.h b/Source/WTF/wtf/MD5.h
index fa59ca238..ff1dbbb93 100644
--- a/Source/WTF/wtf/MD5.h
+++ b/Source/WTF/wtf/MD5.h
@@ -34,6 +34,10 @@
#include <array>
#include <wtf/Vector.h>
+#if PLATFORM(COCOA)
+#include <CommonCrypto/CommonDigest.h>
+#endif
+
namespace WTF {
class MD5 {
@@ -56,9 +60,13 @@ public:
WTF_EXPORT_PRIVATE void checksum(Digest&);
private:
+#if PLATFORM(COCOA)
+ CC_MD5_CTX m_context;
+#else
uint32_t m_buf[4];
uint32_t m_bits[2];
uint8_t m_in[64];
+#endif
};
} // namespace WTF
diff --git a/Source/WTF/wtf/MainThread.cpp b/Source/WTF/wtf/MainThread.cpp
index b3d63d49e..eea0b36cd 100644
--- a/Source/WTF/wtf/MainThread.cpp
+++ b/Source/WTF/wtf/MainThread.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2008, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,82 +31,36 @@
#include "CurrentTime.h"
#include "Deque.h"
-#include "Functional.h"
#include "StdLibExtras.h"
+#include "Threading.h"
#include <mutex>
+#include <wtf/Lock.h>
#include <wtf/NeverDestroyed.h>
#include <wtf/ThreadSpecific.h>
namespace WTF {
-struct FunctionWithContext {
- MainThreadFunction* function;
- void* context;
-
- FunctionWithContext(MainThreadFunction* function = nullptr, void* context = nullptr)
- : function(function)
- , context(context)
- {
- }
- bool operator == (const FunctionWithContext& o)
- {
- return function == o.function && context == o.context;
- }
-};
-
-class FunctionWithContextFinder {
-public:
- FunctionWithContextFinder(const FunctionWithContext& m) : m(m) {}
- bool operator() (FunctionWithContext& o) { return o == m; }
- FunctionWithContext m;
-};
-
-
-typedef Deque<FunctionWithContext> FunctionQueue;
-
static bool callbacksPaused; // This global variable is only accessed from main thread.
-#if !PLATFORM(MAC)
+#if !OS(DARWIN) && !PLATFORM(GTK)
static ThreadIdentifier mainThreadIdentifier;
#endif
-static std::mutex& mainThreadFunctionQueueMutex()
-{
- static NeverDestroyed<std::mutex> mutex;
-
- return mutex;
-}
+static StaticLock mainThreadFunctionQueueMutex;
-static FunctionQueue& functionQueue()
+static Deque<Function<void ()>>& functionQueue()
{
- static NeverDestroyed<FunctionQueue> functionQueue;
+ static NeverDestroyed<Deque<Function<void ()>>> functionQueue;
return functionQueue;
}
-
-#if !PLATFORM(MAC)
-
-void initializeMainThread()
-{
- static bool initializedMainThread;
- if (initializedMainThread)
- return;
- initializedMainThread = true;
-
- mainThreadIdentifier = currentThread();
-
- mainThreadFunctionQueueMutex();
- initializeMainThreadPlatform();
- initializeGCThreads();
-}
-
-#else
-
+#if OS(DARWIN) || PLATFORM(GTK)
static pthread_once_t initializeMainThreadKeyOnce = PTHREAD_ONCE_INIT;
static void initializeMainThreadOnce()
{
- mainThreadFunctionQueueMutex();
+ initializeThreading();
initializeMainThreadPlatform();
+ initializeGCThreads();
}
void initializeMainThread()
@@ -114,18 +68,19 @@ void initializeMainThread()
pthread_once(&initializeMainThreadKeyOnce, initializeMainThreadOnce);
}
-#if !USE(WEB_THREAD)
+#if !USE(WEB_THREAD) && !PLATFORM(GTK)
static void initializeMainThreadToProcessMainThreadOnce()
{
- mainThreadFunctionQueueMutex();
+ initializeThreading();
initializeMainThreadToProcessMainThreadPlatform();
+ initializeGCThreads();
}
void initializeMainThreadToProcessMainThread()
{
pthread_once(&initializeMainThreadKeyOnce, initializeMainThreadToProcessMainThreadOnce);
}
-#else
+#elif !PLATFORM(GTK)
static pthread_once_t initializeWebThreadKeyOnce = PTHREAD_ONCE_INIT;
static void initializeWebThreadOnce()
@@ -139,10 +94,24 @@ void initializeWebThread()
}
#endif // !USE(WEB_THREAD)
+#else
+void initializeMainThread()
+{
+ static bool initializedMainThread;
+ if (initializedMainThread)
+ return;
+ initializedMainThread = true;
+
+ initializeThreading();
+ mainThreadIdentifier = currentThread();
+
+ initializeMainThreadPlatform();
+ initializeGCThreads();
+}
#endif
// 0.1 sec delays in UI is approximate threshold when they become noticeable. Have a limit that's half of that.
-static const double maxRunLoopSuspensionTime = 0.05;
+static const auto maxRunLoopSuspensionTime = std::chrono::milliseconds(50);
void dispatchFunctionsFromMainThread()
{
@@ -151,72 +120,51 @@ void dispatchFunctionsFromMainThread()
if (callbacksPaused)
return;
- double startTime = monotonicallyIncreasingTime();
+ auto startTime = std::chrono::steady_clock::now();
+
+ Function<void ()> function;
- FunctionWithContext invocation;
while (true) {
{
- std::lock_guard<std::mutex> lock(mainThreadFunctionQueueMutex());
+ std::lock_guard<StaticLock> lock(mainThreadFunctionQueueMutex);
if (!functionQueue().size())
break;
- invocation = functionQueue().takeFirst();
+
+ function = functionQueue().takeFirst();
}
- invocation.function(invocation.context);
+ function();
+
+ // Clearing the function can have side effects, so do so outside of the lock above.
+ function = nullptr;
// If we are running accumulated functions for too long so UI may become unresponsive, we need to
// yield so the user input can be processed. Otherwise user may not be able to even close the window.
// This code has effect only in case the scheduleDispatchFunctionsOnMainThread() is implemented in a way that
// allows input events to be processed before we are back here.
- if (monotonicallyIncreasingTime() - startTime > maxRunLoopSuspensionTime) {
+ if (std::chrono::steady_clock::now() - startTime > maxRunLoopSuspensionTime) {
scheduleDispatchFunctionsOnMainThread();
break;
}
}
}
-void callOnMainThread(MainThreadFunction* function, void* context)
+void callOnMainThread(Function<void ()>&& function)
{
ASSERT(function);
+
bool needToSchedule = false;
+
{
- std::lock_guard<std::mutex> lock(mainThreadFunctionQueueMutex());
+ std::lock_guard<StaticLock> lock(mainThreadFunctionQueueMutex);
needToSchedule = functionQueue().size() == 0;
- functionQueue().append(FunctionWithContext(function, context));
+ functionQueue().append(WTFMove(function));
}
+
if (needToSchedule)
scheduleDispatchFunctionsOnMainThread();
}
-void cancelCallOnMainThread(MainThreadFunction* function, void* context)
-{
- ASSERT(function);
-
- std::lock_guard<std::mutex> lock(mainThreadFunctionQueueMutex());
-
- FunctionWithContextFinder pred(FunctionWithContext(function, context));
-
- while (true) {
- // We must redefine 'i' each pass, because the itererator's operator=
- // requires 'this' to be valid, and remove() invalidates all iterators
- FunctionQueue::iterator i(functionQueue().findIf(pred));
- if (i == functionQueue().end())
- break;
- functionQueue().remove(i);
- }
-}
-
-static void callFunctionObject(void* context)
-{
- auto function = std::unique_ptr<std::function<void ()>>(static_cast<std::function<void ()>*>(context));
- (*function)();
-}
-
-void callOnMainThread(std::function<void ()> function)
-{
- callOnMainThread(callFunctionObject, std::make_unique<std::function<void ()>>(std::move(function)).release());
-}
-
void setMainThreadCallbacksPaused(bool paused)
{
ASSERT(isMainThread());
@@ -230,7 +178,7 @@ void setMainThreadCallbacksPaused(bool paused)
scheduleDispatchFunctionsOnMainThread();
}
-#if !PLATFORM(MAC)
+#if !OS(DARWIN) && !PLATFORM(GTK)
bool isMainThread()
{
return currentThread() == mainThreadIdentifier;
@@ -244,19 +192,19 @@ bool canAccessThreadLocalDataForThread(ThreadIdentifier threadId)
}
#endif
-#if ENABLE(PARALLEL_GC)
-static ThreadSpecific<bool>* isGCThread;
-#endif
+static ThreadSpecific<std::optional<GCThreadType>, CanBeGCThread::True>* isGCThread;
void initializeGCThreads()
{
-#if ENABLE(PARALLEL_GC)
- isGCThread = new ThreadSpecific<bool>();
-#endif
+ static std::once_flag flag;
+ std::call_once(
+ flag,
+ [] {
+ isGCThread = new ThreadSpecific<std::optional<GCThreadType>, CanBeGCThread::True>();
+ });
}
-#if ENABLE(PARALLEL_GC)
-void registerGCThread()
+void registerGCThread(GCThreadType type)
{
if (!isGCThread) {
// This happens if we're running in a process that doesn't care about
@@ -264,22 +212,24 @@ void registerGCThread()
return;
}
- **isGCThread = true;
+ **isGCThread = type;
}
bool isMainThreadOrGCThread()
{
- if (isGCThread->isSet() && **isGCThread)
+ if (mayBeGCThread())
return true;
return isMainThread();
}
-#elif PLATFORM(MAC)
-// This is necessary because JavaScriptCore.exp doesn't support preprocessor macros.
-bool isMainThreadOrGCThread()
+
+std::optional<GCThreadType> mayBeGCThread()
{
- return isMainThread();
+ if (!isGCThread)
+ return std::nullopt;
+ if (!isGCThread->isSet())
+ return std::nullopt;
+ return **isGCThread;
}
-#endif
} // namespace WTF
diff --git a/Source/WTF/wtf/MainThread.h b/Source/WTF/wtf/MainThread.h
index f33d71f30..e0a585f36 100644
--- a/Source/WTF/wtf/MainThread.h
+++ b/Source/WTF/wtf/MainThread.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007, 2008, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2008, 2010, 2016 Apple Inc. All rights reserved.
* Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,19 +32,23 @@
#include <functional>
#include <stdint.h>
+#include <wtf/Function.h>
+#include <wtf/Optional.h>
namespace WTF {
+class PrintStream;
+
typedef uint32_t ThreadIdentifier;
-typedef void MainThreadFunction(void*);
// Must be called from the main thread.
WTF_EXPORT_PRIVATE void initializeMainThread();
-WTF_EXPORT_PRIVATE void callOnMainThread(MainThreadFunction*, void* context);
-WTF_EXPORT_PRIVATE void cancelCallOnMainThread(MainThreadFunction*, void* context);
+WTF_EXPORT_PRIVATE void callOnMainThread(Function<void ()>&&);
-WTF_EXPORT_PRIVATE void callOnMainThread(std::function<void ()>);
+#if PLATFORM(COCOA)
+WTF_EXPORT_PRIVATE void callOnWebThreadOrDispatchAsyncOnMainThread(void (^block)());
+#endif
WTF_EXPORT_PRIVATE void setMainThreadCallbacksPaused(bool paused);
@@ -64,23 +68,25 @@ inline bool isWebThread() { return isMainThread(); }
inline bool isUIThread() { return isMainThread(); }
#endif // USE(WEB_THREAD)
-void initializeGCThreads();
+WTF_EXPORT_PRIVATE void initializeGCThreads();
-#if ENABLE(PARALLEL_GC)
-void registerGCThread();
-WTF_EXPORT_PRIVATE bool isMainThreadOrGCThread();
-#elif PLATFORM(MAC) || PLATFORM(IOS)
+enum class GCThreadType {
+ Main,
+ Helper
+};
+
+void printInternal(PrintStream&, GCThreadType);
+
+WTF_EXPORT_PRIVATE void registerGCThread(GCThreadType);
+WTF_EXPORT_PRIVATE std::optional<GCThreadType> mayBeGCThread();
WTF_EXPORT_PRIVATE bool isMainThreadOrGCThread();
-#else
-inline bool isMainThreadOrGCThread() { return isMainThread(); }
-#endif
// NOTE: these functions are internal to the callOnMainThread implementation.
void initializeMainThreadPlatform();
void scheduleDispatchFunctionsOnMainThread();
void dispatchFunctionsFromMainThread();
-#if PLATFORM(MAC)
+#if OS(DARWIN) && !PLATFORM(GTK)
#if !USE(WEB_THREAD)
// This version of initializeMainThread sets up the main thread as corresponding
// to the process's main thread, and not necessarily the thread that calls this
@@ -92,14 +98,18 @@ void initializeMainThreadToProcessMainThreadPlatform();
} // namespace WTF
+using WTF::GCThreadType;
using WTF::callOnMainThread;
-using WTF::cancelCallOnMainThread;
-using WTF::setMainThreadCallbacksPaused;
+using WTF::canAccessThreadLocalDataForThread;
using WTF::isMainThread;
using WTF::isMainThreadOrGCThread;
-using WTF::canAccessThreadLocalDataForThread;
using WTF::isUIThread;
using WTF::isWebThread;
+using WTF::mayBeGCThread;
+using WTF::setMainThreadCallbacksPaused;
+#if PLATFORM(COCOA)
+using WTF::callOnWebThreadOrDispatchAsyncOnMainThread;
+#endif
#if USE(WEB_THREAD)
using WTF::initializeWebThread;
using WTF::initializeApplicationUIThreadIdentifier;
diff --git a/Source/WTF/wtf/MallocPtr.h b/Source/WTF/wtf/MallocPtr.h
index 63a1fa4bf..0184b4f72 100644
--- a/Source/WTF/wtf/MallocPtr.h
+++ b/Source/WTF/wtf/MallocPtr.h
@@ -61,9 +61,7 @@ public:
T *leakPtr() WARN_UNUSED_RETURN
{
- T* ptr = m_ptr;
- m_ptr = nullptr;
- return ptr;
+ return std::exchange(m_ptr, nullptr);
}
bool operator!() const
@@ -73,7 +71,7 @@ public:
MallocPtr& operator=(MallocPtr&& other)
{
- MallocPtr ptr = std::move(other);
+ MallocPtr ptr = WTFMove(other);
swap(ptr);
return *this;
@@ -86,6 +84,19 @@ public:
template<typename U> friend MallocPtr<U> adoptMallocPtr(U*);
+ static MallocPtr malloc(size_t size)
+ {
+ MallocPtr mallocPtr;
+ mallocPtr.m_ptr = static_cast<T*>(fastMalloc(size));
+
+ return mallocPtr;
+ }
+
+ void realloc(size_t newSize)
+ {
+ m_ptr = static_cast<T*>(fastRealloc(m_ptr, newSize));
+ }
+
private:
explicit MallocPtr(T* ptr)
: m_ptr(ptr)
diff --git a/Source/WTF/wtf/MathExtras.h b/Source/WTF/wtf/MathExtras.h
index 716f512e8..948c48ffa 100644
--- a/Source/WTF/wtf/MathExtras.h
+++ b/Source/WTF/wtf/MathExtras.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2007, 2008, 2009, 2010, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2006, 2007, 2008, 2009, 2010, 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -69,17 +69,10 @@ const float piOverFourFloat = static_cast<float>(M_PI_4);
#ifndef M_SQRT2
const double sqrtOfTwoDouble = 1.41421356237309504880;
+const float sqrtOfTwoFloat = 1.41421356237309504880f;
#else
const double sqrtOfTwoDouble = M_SQRT2;
-#endif
-
-#if OS(DARWIN)
-
-// Work around a bug in the Mac OS X libc where ceil(-0.1) return +0.
-inline double wtf_ceil(double x) { return copysign(ceil(x), x); }
-
-#define ceil(x) wtf_ceil(x)
-
+const float sqrtOfTwoFloat = static_cast<float>(M_SQRT2);
#endif
#if OS(SOLARIS)
@@ -100,21 +93,6 @@ inline bool isinf(double x) { return !finite(x) && !isnand(x); }
#endif
-#if OS(OPENBSD)
-
-namespace std {
-
-#ifndef isfinite
-inline bool isfinite(double x) { return finite(x); }
-#endif
-#ifndef signbit
-inline bool signbit(double x) { struct ieee_double *p = (struct ieee_double *)&x; return p->dbl_sign; }
-#endif
-
-} // namespace std
-
-#endif
-
#if COMPILER(MSVC)
// Work around a bug in Win, where atan2(+-infinity, +-infinity) yields NaN instead of specific values.
@@ -140,37 +118,7 @@ extern "C" inline double wtf_atan2(double x, double y)
return result;
}
-// Work around a bug in the Microsoft CRT, where fmod(x, +-infinity) yields NaN instead of x.
-extern "C" inline double wtf_fmod(double x, double y) { return (!std::isinf(x) && std::isinf(y)) ? x : fmod(x, y); }
-
-// Work around a bug in the Microsoft CRT, where pow(NaN, 0) yields NaN instead of 1.
-extern "C" inline double wtf_pow(double x, double y) { return y == 0 ? 1 : pow(x, y); }
-
#define atan2(x, y) wtf_atan2(x, y)
-#define fmod(x, y) wtf_fmod(x, y)
-#define pow(x, y) wtf_pow(x, y)
-
-// MSVC's math functions do not bring lrint.
-inline long int lrint(double flt)
-{
- int64_t intgr;
-#if CPU(X86)
- __asm {
- fld flt
- fistp intgr
- };
-#else
- ASSERT(std::isfinite(flt));
- double rounded = round(flt);
- intgr = static_cast<int64_t>(rounded);
- // If the fractional part is exactly 0.5, we need to check whether
- // the rounded result is even. If it is not we need to add 1 to
- // negative values and subtract one from positive values.
- if ((fabs(intgr - flt) == 0.5) & intgr)
- intgr -= ((intgr >> 62) | 1); // 1 with the sign of result, i.e. -1 or 1.
-#endif
- return static_cast<long int>(intgr);
-}
#endif // COMPILER(MSVC)
@@ -233,9 +181,12 @@ inline int clampToInteger(float value)
return clampTo<int>(value);
}
-inline int clampToInteger(unsigned x)
+template<typename T>
+inline int clampToInteger(T x)
{
- const unsigned intMax = static_cast<unsigned>(std::numeric_limits<int>::max());
+ static_assert(std::numeric_limits<T>::is_integer, "T must be an integer.");
+
+ const T intMax = static_cast<unsigned>(std::numeric_limits<int>::max());
if (x >= intMax)
return std::numeric_limits<int>::max();
@@ -247,6 +198,15 @@ inline bool isWithinIntRange(float x)
return x > static_cast<float>(std::numeric_limits<int>::min()) && x < static_cast<float>(std::numeric_limits<int>::max());
}
+inline float normalizedFloat(float value)
+{
+ if (value > 0 && value < std::numeric_limits<float>::min())
+ return std::numeric_limits<float>::min();
+ if (value < 0 && value > -std::numeric_limits<float>::min())
+ return -std::numeric_limits<float>::min();
+ return value;
+}
+
template<typename T> inline bool hasOneBitSet(T value)
{
return !((value - 1) & value) && value;
@@ -264,9 +224,11 @@ template<typename T> inline bool hasTwoOrMoreBitsSet(T value)
template <typename T> inline unsigned getLSBSet(T value)
{
+ typedef typename std::make_unsigned<T>::type UnsignedT;
unsigned result = 0;
- while (value >>= 1)
+ UnsignedT unsignedValue = static_cast<UnsignedT>(value);
+ while (unsignedValue >>= 1)
++result;
return result;
@@ -412,6 +374,108 @@ inline unsigned fastLog2(unsigned i)
return log2;
}
+inline unsigned fastLog2(uint64_t value)
+{
+ unsigned high = static_cast<unsigned>(value >> 32);
+ if (high)
+ return fastLog2(high) + 32;
+ return fastLog2(static_cast<unsigned>(value));
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_floating_point<T>::value, T>::type safeFPDivision(T u, T v)
+{
+ // Protect against overflow / underflow.
+ if (v < 1 && u > v * std::numeric_limits<T>::max())
+ return std::numeric_limits<T>::max();
+ if (v > 1 && u < v * std::numeric_limits<T>::min())
+ return 0;
+ return u / v;
+}
+
+// Floating point numbers comparison:
+// u is "essentially equal" [1][2] to v if: | u - v | / |u| <= e and | u - v | / |v| <= e
+//
+// [1] Knuth, D. E. "Accuracy of Floating Point Arithmetic." The Art of Computer Programming. 3rd ed. Vol. 2.
+// Boston: Addison-Wesley, 1998. 229-45.
+// [2] http://www.boost.org/doc/libs/1_34_0/libs/test/doc/components/test_tools/floating_point_comparison.html
+template <typename T>
+inline typename std::enable_if<std::is_floating_point<T>::value, bool>::type areEssentiallyEqual(T u, T v, T epsilon = std::numeric_limits<T>::epsilon())
+{
+ if (u == v)
+ return true;
+
+ const T delta = std::abs(u - v);
+ return safeFPDivision(delta, std::abs(u)) <= epsilon && safeFPDivision(delta, std::abs(v)) <= epsilon;
+}
+
+// Match behavior of Math.min, where NaN is returned if either argument is NaN.
+template <typename T>
+inline typename std::enable_if<std::is_floating_point<T>::value, T>::type nanPropagatingMin(T a, T b)
+{
+ return std::isnan(a) || std::isnan(b) ? std::numeric_limits<T>::quiet_NaN() : std::min(a, b);
+}
+
+// Match behavior of Math.max, where NaN is returned if either argument is NaN.
+template <typename T>
+inline typename std::enable_if<std::is_floating_point<T>::value, T>::type nanPropagatingMax(T a, T b)
+{
+ return std::isnan(a) || std::isnan(b) ? std::numeric_limits<T>::quiet_NaN() : std::max(a, b);
+}
+
+inline bool isIntegral(float value)
+{
+ return static_cast<int>(value) == value;
+}
+
+template<typename T>
+inline void incrementWithSaturation(T& value)
+{
+ if (value != std::numeric_limits<T>::max())
+ value++;
+}
+
+template<typename T>
+inline T leftShiftWithSaturation(T value, unsigned shiftAmount, T max = std::numeric_limits<T>::max())
+{
+ T result = value << shiftAmount;
+ // We will have saturated if shifting right doesn't recover the original value.
+ if (result >> shiftAmount != value)
+ return max;
+ if (result > max)
+ return max;
+ return result;
+}
+
+// Check if two ranges overlap assuming that neither range is empty.
+template<typename T>
+inline bool nonEmptyRangesOverlap(T leftMin, T leftMax, T rightMin, T rightMax)
+{
+ ASSERT(leftMin < leftMax);
+ ASSERT(rightMin < rightMax);
+
+ return leftMax > rightMin && rightMax > leftMin;
+}
+
+// Pass ranges with the min being inclusive and the max being exclusive. For example, this should
+// return false:
+//
+// rangesOverlap(0, 8, 8, 16)
+template<typename T>
+inline bool rangesOverlap(T leftMin, T leftMax, T rightMin, T rightMax)
+{
+ ASSERT(leftMin <= leftMax);
+ ASSERT(rightMin <= rightMax);
+
+ // Empty ranges interfere with nothing.
+ if (leftMin == leftMax)
+ return false;
+ if (rightMin == rightMax)
+ return false;
+
+ return nonEmptyRangesOverlap(leftMin, leftMax, rightMin, rightMax);
+}
+
} // namespace WTF
#endif // #ifndef WTF_MathExtras_h
diff --git a/Source/WTF/wtf/MediaTime.cpp b/Source/WTF/wtf/MediaTime.cpp
index f856714d0..9f53402c1 100644
--- a/Source/WTF/wtf/MediaTime.cpp
+++ b/Source/WTF/wtf/MediaTime.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,15 +30,17 @@
#include "MediaTime.h"
#include <algorithm>
+#include <cstdlib>
#include <wtf/CheckedArithmetic.h>
#include <wtf/MathExtras.h>
+#include <wtf/PrintStream.h>
namespace WTF {
-static int32_t greatestCommonDivisor(int32_t a, int32_t b)
+static uint32_t greatestCommonDivisor(uint32_t a, uint32_t b)
{
// Euclid's Algorithm
- int32_t temp = 0;
+ uint32_t temp = 0;
while (b) {
temp = b;
b = a % b;
@@ -47,17 +49,17 @@ static int32_t greatestCommonDivisor(int32_t a, int32_t b)
return a;
}
-static int32_t leastCommonMultiple(int32_t a, int32_t b, int32_t &result)
+static uint32_t leastCommonMultiple(uint32_t a, uint32_t b, uint32_t &result)
{
return safeMultiply(a, b / greatestCommonDivisor(a, b), result);
}
-static int32_t signum(int64_t val)
+static int64_t signum(int64_t val)
{
return (0 < val) - (val < 0);
}
-const int32_t MediaTime::MaximumTimeScale = 0x7fffffffL;
+const uint32_t MediaTime::MaximumTimeScale = 0x7fffffffL;
MediaTime::MediaTime()
: m_timeValue(0)
@@ -66,7 +68,7 @@ MediaTime::MediaTime()
{
}
-MediaTime::MediaTime(int64_t value, int32_t scale, uint32_t flags)
+MediaTime::MediaTime(int64_t value, uint32_t scale, uint8_t flags)
: m_timeValue(value)
, m_timeScale(scale)
, m_timeFlags(flags)
@@ -82,7 +84,23 @@ MediaTime::MediaTime(const MediaTime& rhs)
*this = rhs;
}
-MediaTime MediaTime::createWithFloat(float floatTime, int32_t timeScale)
+MediaTime MediaTime::createWithFloat(float floatTime)
+{
+ if (floatTime != floatTime)
+ return invalidTime();
+ if (std::isinf(floatTime))
+ return std::signbit(floatTime) ? negativeInfiniteTime() : positiveInfiniteTime();
+ if (floatTime > std::numeric_limits<int64_t>::max())
+ return positiveInfiniteTime();
+ if (floatTime < std::numeric_limits<int64_t>::min())
+ return negativeInfiniteTime();
+
+ MediaTime value(0, DefaultTimeScale, Valid | DoubleValue);
+ value.m_timeValueAsDouble = floatTime;
+ return value;
+}
+
+MediaTime MediaTime::createWithFloat(float floatTime, uint32_t timeScale)
{
if (floatTime != floatTime)
return invalidTime();
@@ -98,7 +116,23 @@ MediaTime MediaTime::createWithFloat(float floatTime, int32_t timeScale)
return MediaTime(static_cast<int64_t>(floatTime * timeScale), timeScale, Valid);
}
-MediaTime MediaTime::createWithDouble(double doubleTime, int32_t timeScale)
+MediaTime MediaTime::createWithDouble(double doubleTime)
+{
+ if (doubleTime != doubleTime)
+ return invalidTime();
+ if (std::isinf(doubleTime))
+ return std::signbit(doubleTime) ? negativeInfiniteTime() : positiveInfiniteTime();
+ if (doubleTime > std::numeric_limits<int64_t>::max())
+ return positiveInfiniteTime();
+ if (doubleTime < std::numeric_limits<int64_t>::min())
+ return negativeInfiniteTime();
+
+ MediaTime value(0, DefaultTimeScale, Valid | DoubleValue);
+ value.m_timeValueAsDouble = doubleTime;
+ return value;
+}
+
+MediaTime MediaTime::createWithDouble(double doubleTime, uint32_t timeScale)
{
if (doubleTime != doubleTime)
return invalidTime();
@@ -111,7 +145,7 @@ MediaTime MediaTime::createWithDouble(double doubleTime, int32_t timeScale)
while (doubleTime * timeScale > std::numeric_limits<int64_t>::max())
timeScale /= 2;
- return MediaTime(static_cast<int64_t>(doubleTime * timeScale), timeScale, Valid);
+ return MediaTime(static_cast<int64_t>(std::round(doubleTime * timeScale)), timeScale, Valid);
}
float MediaTime::toFloat() const
@@ -122,6 +156,8 @@ float MediaTime::toFloat() const
return std::numeric_limits<float>::infinity();
if (isNegativeInfinite())
return -std::numeric_limits<float>::infinity();
+ if (hasDoubleValue())
+ return m_timeValueAsDouble;
return static_cast<float>(m_timeValue) / m_timeScale;
}
@@ -133,6 +169,8 @@ double MediaTime::toDouble() const
return std::numeric_limits<double>::infinity();
if (isNegativeInfinite())
return -std::numeric_limits<double>::infinity();
+ if (hasDoubleValue())
+ return m_timeValueAsDouble;
return static_cast<double>(m_timeValue) / m_timeScale;
}
@@ -164,11 +202,20 @@ MediaTime MediaTime::operator+(const MediaTime& rhs) const
if (isNegativeInfinite() || rhs.isNegativeInfinite())
return negativeInfiniteTime();
- int32_t commonTimeScale;
- if (!leastCommonMultiple(this->m_timeScale, rhs.m_timeScale, commonTimeScale) || commonTimeScale > MaximumTimeScale)
- commonTimeScale = MaximumTimeScale;
+ if (hasDoubleValue() && rhs.hasDoubleValue())
+ return MediaTime::createWithDouble(m_timeValueAsDouble + rhs.m_timeValueAsDouble);
+
MediaTime a = *this;
MediaTime b = rhs;
+
+ if (a.hasDoubleValue())
+ a.setTimeScale(DefaultTimeScale);
+ else if (b.hasDoubleValue())
+ b.setTimeScale(DefaultTimeScale);
+
+ uint32_t commonTimeScale;
+ if (!leastCommonMultiple(a.m_timeScale, b.m_timeScale, commonTimeScale) || commonTimeScale > MaximumTimeScale)
+ commonTimeScale = MaximumTimeScale;
a.setTimeScale(commonTimeScale);
b.setTimeScale(commonTimeScale);
while (!safeAdd(a.m_timeValue, b.m_timeValue, a.m_timeValue)) {
@@ -201,11 +248,20 @@ MediaTime MediaTime::operator-(const MediaTime& rhs) const
if (isNegativeInfinite() || rhs.isPositiveInfinite())
return negativeInfiniteTime();
- int32_t commonTimeScale;
- if (!leastCommonMultiple(this->m_timeScale, rhs.m_timeScale, commonTimeScale) || commonTimeScale > MaximumTimeScale)
- commonTimeScale = MaximumTimeScale;
+ if (hasDoubleValue() && rhs.hasDoubleValue())
+ return MediaTime::createWithDouble(m_timeValueAsDouble - rhs.m_timeValueAsDouble);
+
MediaTime a = *this;
MediaTime b = rhs;
+
+ if (a.hasDoubleValue())
+ a.setTimeScale(DefaultTimeScale);
+ else if (b.hasDoubleValue())
+ b.setTimeScale(DefaultTimeScale);
+
+ uint32_t commonTimeScale;
+ if (!leastCommonMultiple(this->m_timeScale, rhs.m_timeScale, commonTimeScale) || commonTimeScale > MaximumTimeScale)
+ commonTimeScale = MaximumTimeScale;
a.setTimeScale(commonTimeScale);
b.setTimeScale(commonTimeScale);
while (!safeSub(a.m_timeValue, b.m_timeValue, a.m_timeValue)) {
@@ -218,6 +274,28 @@ MediaTime MediaTime::operator-(const MediaTime& rhs) const
return a;
}
+MediaTime MediaTime::operator-() const
+{
+ if (isInvalid())
+ return invalidTime();
+
+ if (isIndefinite())
+ return indefiniteTime();
+
+ if (isPositiveInfinite())
+ return negativeInfiniteTime();
+
+ if (isNegativeInfinite())
+ return positiveInfiniteTime();
+
+ MediaTime negativeTime = *this;
+ if (negativeTime.hasDoubleValue())
+ negativeTime.m_timeValueAsDouble = -negativeTime.m_timeValueAsDouble;
+ else
+ negativeTime.m_timeValue = -negativeTime.m_timeValue;
+ return negativeTime;
+}
+
MediaTime MediaTime::operator*(int32_t rhs) const
{
if (isInvalid())
@@ -243,6 +321,11 @@ MediaTime MediaTime::operator*(int32_t rhs) const
MediaTime a = *this;
+ if (a.hasDoubleValue()) {
+ a.m_timeValueAsDouble *= rhs;
+ return a;
+ }
+
while (!safeMultiply(a.m_timeValue, rhs, a.m_timeValue)) {
if (a.m_timeScale == 1)
return signum(a.m_timeValue) == signum(rhs) ? positiveInfiniteTime() : negativeInfiniteTime();
@@ -252,61 +335,86 @@ MediaTime MediaTime::operator*(int32_t rhs) const
return a;
}
-bool MediaTime::operator<(const MediaTime& rhs) const
+bool MediaTime::operator!() const
{
- return compare(rhs) == LessThan;
+ return (m_timeFlags == Valid && !m_timeValue)
+ || (m_timeFlags == (Valid | DoubleValue) && !m_timeValueAsDouble);
}
-bool MediaTime::operator>(const MediaTime& rhs) const
+MediaTime::operator bool() const
{
- return compare(rhs) == GreaterThan;
+ return !(m_timeFlags == Valid && !m_timeValue)
+ && !(m_timeFlags == (Valid | DoubleValue) && !m_timeValueAsDouble);
}
-bool MediaTime::operator!=(const MediaTime& rhs) const
+MediaTime::ComparisonFlags MediaTime::compare(const MediaTime& rhs) const
{
- return compare(rhs) != EqualTo;
-}
+ auto andFlags = m_timeFlags & rhs.m_timeFlags;
+ if (andFlags & (PositiveInfinite | NegativeInfinite | Indefinite))
+ return EqualTo;
-bool MediaTime::operator==(const MediaTime& rhs) const
-{
- return compare(rhs) == EqualTo;
-}
+ auto orFlags = m_timeFlags | rhs.m_timeFlags;
+ if (!(orFlags & Valid))
+ return EqualTo;
-bool MediaTime::operator>=(const MediaTime& rhs) const
-{
- return compare(rhs) >= EqualTo;
-}
+ if (!(andFlags & Valid))
+ return isInvalid() ? GreaterThan : LessThan;
-bool MediaTime::operator<=(const MediaTime& rhs) const
-{
- return compare(rhs) <= EqualTo;
-}
+ if (orFlags & NegativeInfinite)
+ return isNegativeInfinite() ? LessThan : GreaterThan;
-MediaTime::ComparisonFlags MediaTime::compare(const MediaTime& rhs) const
-{
- if ((isPositiveInfinite() && rhs.isPositiveInfinite())
- || (isNegativeInfinite() && rhs.isNegativeInfinite())
- || (isInvalid() && rhs.isInvalid())
- || (isIndefinite() && rhs.isIndefinite()))
+ if (orFlags & PositiveInfinite)
+ return isPositiveInfinite() ? GreaterThan : LessThan;
+
+ if (orFlags & Indefinite)
+ return isIndefinite() ? GreaterThan : LessThan;
+
+ if (andFlags & DoubleValue) {
+ if (m_timeValueAsDouble == rhs.m_timeValueAsDouble)
+ return EqualTo;
+
+ return m_timeValueAsDouble < rhs.m_timeValueAsDouble ? LessThan : GreaterThan;
+ }
+
+ if (orFlags & DoubleValue) {
+ double a = toDouble();
+ double b = rhs.toDouble();
+ if (a > b)
+ return GreaterThan;
+ if (a < b)
+ return LessThan;
return EqualTo;
+ }
- if (isInvalid())
- return GreaterThan;
+ if ((m_timeValue < 0) != (rhs.m_timeValue < 0))
+ return m_timeValue < 0 ? LessThan : GreaterThan;
- if (rhs.isInvalid())
- return LessThan;
+ if (!m_timeValue && !rhs.m_timeValue)
+ return EqualTo;
- if (rhs.isNegativeInfinite() || isPositiveInfinite())
- return GreaterThan;
+ if (m_timeScale == rhs.m_timeScale) {
+ if (m_timeValue == rhs.m_timeValue)
+ return EqualTo;
+ return m_timeValue < rhs.m_timeValue ? LessThan : GreaterThan;
+ }
- if (rhs.isPositiveInfinite() || isNegativeInfinite())
+ if (m_timeValue == rhs.m_timeValue)
+ return m_timeScale < rhs.m_timeScale ? GreaterThan : LessThan;
+
+ if (m_timeValue < rhs.m_timeValue && m_timeScale > rhs.m_timeScale)
return LessThan;
- if (isIndefinite())
+ if (m_timeValue > rhs.m_timeValue && m_timeScale < rhs.m_timeScale)
return GreaterThan;
- if (rhs.isIndefinite())
- return LessThan;
+ int64_t lhsFactor;
+ int64_t rhsFactor;
+ if (safeMultiply(m_timeValue, static_cast<int64_t>(rhs.m_timeScale), lhsFactor)
+ && safeMultiply(rhs.m_timeValue, static_cast<int64_t>(m_timeScale), rhsFactor)) {
+ if (lhsFactor == rhsFactor)
+ return EqualTo;
+ return lhsFactor < rhsFactor ? LessThan : GreaterThan;
+ }
int64_t rhsWhole = rhs.m_timeValue / rhs.m_timeScale;
int64_t lhsWhole = m_timeValue / m_timeScale;
@@ -317,14 +425,21 @@ MediaTime::ComparisonFlags MediaTime::compare(const MediaTime& rhs) const
int64_t rhsRemain = rhs.m_timeValue % rhs.m_timeScale;
int64_t lhsRemain = m_timeValue % m_timeScale;
- int64_t lhsFactor = lhsRemain * rhs.m_timeScale;
- int64_t rhsFactor = rhsRemain * m_timeScale;
+ lhsFactor = lhsRemain * rhs.m_timeScale;
+ rhsFactor = rhsRemain * m_timeScale;
if (lhsFactor == rhsFactor)
return EqualTo;
return lhsFactor > rhsFactor ? GreaterThan : LessThan;
}
+bool MediaTime::isBetween(const MediaTime& a, const MediaTime& b) const
+{
+ if (a > b)
+ return *this > b && *this < a;
+ return *this > a && *this < b;
+}
+
const MediaTime& MediaTime::zeroTime()
{
static const MediaTime* time = new MediaTime(0, 1, Valid);
@@ -355,8 +470,13 @@ const MediaTime& MediaTime::indefiniteTime()
return *time;
}
-void MediaTime::setTimeScale(int32_t timeScale)
+void MediaTime::setTimeScale(uint32_t timeScale)
{
+ if (hasDoubleValue()) {
+ *this = MediaTime::createWithDouble(m_timeValueAsDouble, timeScale);
+ return;
+ }
+
if (timeScale == m_timeScale)
return;
timeScale = std::min(MaximumTimeScale, timeScale);
@@ -366,7 +486,7 @@ void MediaTime::setTimeScale(int32_t timeScale)
// timescale by two until the number will fit, and round the
// result.
int64_t newWholePart;
- while (!safeMultiply(wholePart, timeScale, newWholePart))
+ while (!safeMultiply(wholePart, static_cast<int64_t>(timeScale), newWholePart))
timeScale /= 2;
int64_t remainder = m_timeValue % m_timeScale;
@@ -374,14 +494,25 @@ void MediaTime::setTimeScale(int32_t timeScale)
m_timeScale = timeScale;
}
+void MediaTime::dump(PrintStream& out) const
+{
+ out.print("{");
+ if (!hasDoubleValue())
+ out.print(m_timeValue, "/", m_timeScale, " = ");
+ out.print(toDouble(), "}");
+}
+
MediaTime abs(const MediaTime& rhs)
{
if (rhs.isInvalid())
return MediaTime::invalidTime();
if (rhs.isNegativeInfinite() || rhs.isPositiveInfinite())
return MediaTime::positiveInfiniteTime();
+ if (rhs.hasDoubleValue())
+ return MediaTime::createWithDouble(fabs(rhs.m_timeValueAsDouble));
+
MediaTime val = rhs;
- val.m_timeValue *= signum(rhs.m_timeScale) * signum(rhs.m_timeValue);
+ val.m_timeValue = std::abs(rhs.m_timeValue);
return val;
}
diff --git a/Source/WTF/wtf/MediaTime.h b/Source/WTF/wtf/MediaTime.h
index af6561ae3..50a018065 100644
--- a/Source/WTF/wtf/MediaTime.h
+++ b/Source/WTF/wtf/MediaTime.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -38,6 +38,8 @@
namespace WTF {
+class PrintStream;
+
class WTF_EXPORT_PRIVATE MediaTime {
WTF_MAKE_FAST_ALLOCATED;
public:
@@ -47,15 +49,18 @@ public:
PositiveInfinite = 1 << 2,
NegativeInfinite = 1 << 3,
Indefinite = 1 << 4,
+ DoubleValue = 1 << 5,
};
MediaTime();
- MediaTime(int64_t value, int32_t scale, uint32_t flags = Valid);
+ MediaTime(int64_t value, uint32_t scale, uint8_t flags = Valid);
MediaTime(const MediaTime& rhs);
~MediaTime();
- static MediaTime createWithFloat(float floatTime, int32_t timeScale = DefaultTimeScale);
- static MediaTime createWithDouble(double doubleTime, int32_t timeScale = DefaultTimeScale);
+ static MediaTime createWithFloat(float floatTime);
+ static MediaTime createWithFloat(float floatTime, uint32_t timeScale);
+ static MediaTime createWithDouble(double doubleTime);
+ static MediaTime createWithDouble(double doubleTime, uint32_t timeScale);
float toFloat() const;
double toDouble() const;
@@ -65,13 +70,16 @@ public:
MediaTime& operator-=(const MediaTime& rhs) { return *this = *this - rhs; }
MediaTime operator+(const MediaTime& rhs) const;
MediaTime operator-(const MediaTime& rhs) const;
+ MediaTime operator-() const;
MediaTime operator*(int32_t) const;
- bool operator<(const MediaTime& rhs) const;
- bool operator>(const MediaTime& rhs) const;
- bool operator!=(const MediaTime& rhs) const;
- bool operator==(const MediaTime& rhs) const;
- bool operator>=(const MediaTime& rhs) const;
- bool operator<=(const MediaTime& rhs) const;
+ bool operator<(const MediaTime& rhs) const { return compare(rhs) == LessThan; }
+ bool operator>(const MediaTime& rhs) const { return compare(rhs) == GreaterThan; }
+ bool operator!=(const MediaTime& rhs) const { return compare(rhs) != EqualTo; }
+ bool operator==(const MediaTime& rhs) const { return compare(rhs) == EqualTo; }
+ bool operator>=(const MediaTime& rhs) const { return compare(rhs) >= EqualTo; }
+ bool operator<=(const MediaTime& rhs) const { return compare(rhs) <= EqualTo; }
+ bool operator!() const;
+ explicit operator bool() const;
typedef enum {
LessThan = -1,
@@ -80,6 +88,7 @@ public:
} ComparisonFlags;
ComparisonFlags compare(const MediaTime& rhs) const;
+ bool isBetween(const MediaTime&, const MediaTime&) const;
bool isValid() const { return m_timeFlags & Valid; }
bool isInvalid() const { return !isValid(); }
@@ -87,6 +96,7 @@ public:
bool isPositiveInfinite() const { return m_timeFlags & PositiveInfinite; }
bool isNegativeInfinite() const { return m_timeFlags & NegativeInfinite; }
bool isIndefinite() const { return m_timeFlags & Indefinite; }
+ bool hasDoubleValue() const { return m_timeFlags & DoubleValue; }
static const MediaTime& zeroTime();
static const MediaTime& invalidTime();
@@ -95,18 +105,30 @@ public:
static const MediaTime& indefiniteTime();
const int64_t& timeValue() const { return m_timeValue; }
- const int32_t& timeScale() const { return m_timeScale; }
+ const uint32_t& timeScale() const { return m_timeScale; }
+
+ void dump(PrintStream& out) const;
+
+ // Make the following casts errors:
+ operator double() const = delete;
+ MediaTime(double) = delete;
+ operator int() const = delete;
+ MediaTime(int) = delete;
friend WTF_EXPORT_PRIVATE MediaTime abs(const MediaTime& rhs);
-private:
- static const int32_t DefaultTimeScale = 6000;
- static const int32_t MaximumTimeScale;
- void setTimeScale(int32_t);
+ static const uint32_t DefaultTimeScale = 10000000;
+ static const uint32_t MaximumTimeScale;
- int64_t m_timeValue;
- int32_t m_timeScale;
- uint32_t m_timeFlags;
+private:
+ void setTimeScale(uint32_t);
+
+ union {
+ int64_t m_timeValue;
+ double m_timeValueAsDouble;
+ };
+ uint32_t m_timeScale;
+ uint8_t m_timeFlags;
};
inline MediaTime operator*(int32_t lhs, const MediaTime& rhs) { return rhs.operator*(lhs); }
diff --git a/Source/WTF/wtf/MemoryFootprint.cpp b/Source/WTF/wtf/MemoryFootprint.cpp
new file mode 100644
index 000000000..dcc0de611
--- /dev/null
+++ b/Source/WTF/wtf/MemoryFootprint.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MemoryFootprint.h"
+
+#if PLATFORM(COCOA)
+#include <mach/mach.h>
+#include <mach/task_info.h>
+#endif
+
+namespace WTF {
+
+std::optional<size_t> memoryFootprint()
+{
+#if PLATFORM(COCOA)
+ task_vm_info_data_t vmInfo;
+ mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
+ kern_return_t result = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t) &vmInfo, &count);
+ if (result != KERN_SUCCESS)
+ return std::nullopt;
+ return static_cast<size_t>(vmInfo.phys_footprint);
+#else
+ return std::nullopt;
+#endif
+}
+
+}
diff --git a/Source/WTF/wtf/MemoryFootprint.h b/Source/WTF/wtf/MemoryFootprint.h
new file mode 100644
index 000000000..7b8eca8e9
--- /dev/null
+++ b/Source/WTF/wtf/MemoryFootprint.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Optional.h>
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE std::optional<size_t> memoryFootprint();
+
+}
+
+using WTF::memoryFootprint;
+
diff --git a/Source/WTF/wtf/MessageQueue.h b/Source/WTF/wtf/MessageQueue.h
index 4d3d419c1..c52a6ac9c 100644
--- a/Source/WTF/wtf/MessageQueue.h
+++ b/Source/WTF/wtf/MessageQueue.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2015-2016 Apple Inc. All rights reserved.
* Copyright (C) 2009 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,9 +32,12 @@
#include <limits>
#include <wtf/Assertions.h>
+#include <wtf/Condition.h>
#include <wtf/Deque.h>
+#include <wtf/Lock.h>
#include <wtf/Noncopyable.h>
#include <wtf/Threading.h>
+#include <wtf/WallTime.h>
namespace WTF {
@@ -61,9 +64,10 @@ namespace WTF {
std::unique_ptr<DataType> waitForMessage();
std::unique_ptr<DataType> tryGetMessage();
+ Deque<std::unique_ptr<DataType>> takeAllMessages();
std::unique_ptr<DataType> tryGetMessageIgnoringKilled();
template<typename Predicate>
- std::unique_ptr<DataType> waitForMessageFilteredWithTimeout(MessageQueueWaitResult&, Predicate&&, double absoluteTime);
+ std::unique_ptr<DataType> waitForMessageFilteredWithTimeout(MessageQueueWaitResult&, Predicate&&, WallTime absoluteTime);
template<typename Predicate>
void removeIf(Predicate&&);
@@ -74,11 +78,9 @@ namespace WTF {
// The result of isEmpty() is only valid if no other thread is manipulating the queue at the same time.
bool isEmpty();
- static double infiniteTime() { return std::numeric_limits<double>::max(); }
-
private:
- mutable Mutex m_mutex;
- ThreadCondition m_condition;
+ mutable Lock m_mutex;
+ Condition m_condition;
Deque<std::unique_ptr<DataType>> m_queue;
bool m_killed;
};
@@ -91,53 +93,53 @@ namespace WTF {
template<typename DataType>
inline void MessageQueue<DataType>::append(std::unique_ptr<DataType> message)
{
- MutexLocker lock(m_mutex);
- m_queue.append(std::move(message));
- m_condition.signal();
+ LockHolder lock(m_mutex);
+ m_queue.append(WTFMove(message));
+ m_condition.notifyOne();
}
template<typename DataType>
inline void MessageQueue<DataType>::appendAndKill(std::unique_ptr<DataType> message)
{
- MutexLocker lock(m_mutex);
- m_queue.append(std::move(message));
+ LockHolder lock(m_mutex);
+ m_queue.append(WTFMove(message));
m_killed = true;
- m_condition.broadcast();
+ m_condition.notifyAll();
}
// Returns true if the queue was empty before the item was added.
template<typename DataType>
inline bool MessageQueue<DataType>::appendAndCheckEmpty(std::unique_ptr<DataType> message)
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
bool wasEmpty = m_queue.isEmpty();
- m_queue.append(std::move(message));
- m_condition.signal();
+ m_queue.append(WTFMove(message));
+ m_condition.notifyOne();
return wasEmpty;
}
template<typename DataType>
inline void MessageQueue<DataType>::prepend(std::unique_ptr<DataType> message)
{
- MutexLocker lock(m_mutex);
- m_queue.prepend(std::move(message));
- m_condition.signal();
+ LockHolder lock(m_mutex);
+ m_queue.prepend(WTFMove(message));
+ m_condition.notifyOne();
}
template<typename DataType>
inline auto MessageQueue<DataType>::waitForMessage() -> std::unique_ptr<DataType>
{
MessageQueueWaitResult exitReason;
- std::unique_ptr<DataType> result = waitForMessageFilteredWithTimeout(exitReason, [](const DataType&) { return true; }, infiniteTime());
+ std::unique_ptr<DataType> result = waitForMessageFilteredWithTimeout(exitReason, [](const DataType&) { return true; }, WallTime::infinity());
ASSERT(exitReason == MessageQueueTerminated || exitReason == MessageQueueMessageReceived);
return result;
}
template<typename DataType>
template<typename Predicate>
- inline auto MessageQueue<DataType>::waitForMessageFilteredWithTimeout(MessageQueueWaitResult& result, Predicate&& predicate, double absoluteTime) -> std::unique_ptr<DataType>
+ inline auto MessageQueue<DataType>::waitForMessageFilteredWithTimeout(MessageQueueWaitResult& result, Predicate&& predicate, WallTime absoluteTime) -> std::unique_ptr<DataType>
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
bool timedOut = false;
auto found = m_queue.end();
@@ -149,10 +151,10 @@ namespace WTF {
if (found != m_queue.end())
break;
- timedOut = !m_condition.timedWait(m_mutex, absoluteTime);
+ timedOut = !m_condition.waitUntil(m_mutex, absoluteTime);
}
- ASSERT(!timedOut || absoluteTime != infiniteTime());
+ ASSERT(!timedOut || absoluteTime != WallTime::infinity());
if (m_killed) {
result = MessageQueueTerminated;
@@ -165,7 +167,7 @@ namespace WTF {
}
ASSERT(found != m_queue.end());
- std::unique_ptr<DataType> message = std::move(*found);
+ std::unique_ptr<DataType> message = WTFMove(*found);
m_queue.remove(found);
result = MessageQueueMessageReceived;
return message;
@@ -174,7 +176,7 @@ namespace WTF {
template<typename DataType>
inline auto MessageQueue<DataType>::tryGetMessage() -> std::unique_ptr<DataType>
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
if (m_killed)
return nullptr;
if (m_queue.isEmpty())
@@ -184,9 +186,18 @@ namespace WTF {
}
template<typename DataType>
+ inline auto MessageQueue<DataType>::takeAllMessages() -> Deque<std::unique_ptr<DataType>>
+ {
+ LockHolder lock(m_mutex);
+ if (m_killed)
+ return { };
+ return WTFMove(m_queue);
+ }
+
+ template<typename DataType>
inline auto MessageQueue<DataType>::tryGetMessageIgnoringKilled() -> std::unique_ptr<DataType>
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
if (m_queue.isEmpty())
return nullptr;
@@ -197,7 +208,7 @@ namespace WTF {
template<typename Predicate>
inline void MessageQueue<DataType>::removeIf(Predicate&& predicate)
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
while (true) {
auto found = m_queue.findIf([&predicate](const std::unique_ptr<DataType>& ptr) -> bool {
ASSERT(ptr);
@@ -214,7 +225,7 @@ namespace WTF {
template<typename DataType>
inline bool MessageQueue<DataType>::isEmpty()
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
if (m_killed)
return true;
return m_queue.isEmpty();
@@ -223,15 +234,15 @@ namespace WTF {
template<typename DataType>
inline void MessageQueue<DataType>::kill()
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
m_killed = true;
- m_condition.broadcast();
+ m_condition.notifyAll();
}
template<typename DataType>
inline bool MessageQueue<DataType>::killed() const
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
return m_killed;
}
} // namespace WTF
diff --git a/Source/WTF/wtf/MetaAllocator.cpp b/Source/WTF/wtf/MetaAllocator.cpp
index d206233b6..6e44b6724 100644
--- a/Source/WTF/wtf/MetaAllocator.cpp
+++ b/Source/WTF/wtf/MetaAllocator.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -43,7 +43,6 @@ MetaAllocator::~MetaAllocator()
freeFreeSpaceNode(node);
node = next;
}
- m_lock.Finalize();
#ifndef NDEBUG
ASSERT(!m_mallocBalance);
#endif
@@ -61,7 +60,7 @@ void MetaAllocatorTracker::release(MetaAllocatorHandle* handle)
ALWAYS_INLINE void MetaAllocator::release(MetaAllocatorHandle* handle)
{
- SpinLockHolder locker(&m_lock);
+ LockHolder locker(&m_lock);
if (handle->sizeInBytes()) {
decrementPageOccupancy(handle->start(), handle->sizeInBytes());
addFreeSpaceFromReleasedHandle(handle->start(), handle->sizeInBytes());
@@ -92,7 +91,7 @@ void MetaAllocatorHandle::shrink(size_t newSizeInBytes)
{
ASSERT(newSizeInBytes <= m_sizeInBytes);
- SpinLockHolder locker(&m_allocator->m_lock);
+ LockHolder locker(&m_allocator->m_lock);
newSizeInBytes = m_allocator->roundUp(newSizeInBytes);
@@ -114,6 +113,11 @@ void MetaAllocatorHandle::shrink(size_t newSizeInBytes)
m_sizeInBytes = newSizeInBytes;
}
+void MetaAllocatorHandle::dump(PrintStream& out) const
+{
+ out.print(RawPointer(start()), "...", RawPointer(end()));
+}
+
MetaAllocator::MetaAllocator(size_t allocationGranule, size_t pageSize)
: m_allocationGranule(allocationGranule)
, m_pageSize(pageSize)
@@ -129,8 +133,6 @@ MetaAllocator::MetaAllocator(size_t allocationGranule, size_t pageSize)
, m_numFrees(0)
#endif
{
- m_lock.Init();
-
for (m_logPageSize = 0; m_logPageSize < 32; ++m_logPageSize) {
if (static_cast<size_t>(1) << m_logPageSize == m_pageSize)
break;
@@ -146,12 +148,12 @@ MetaAllocator::MetaAllocator(size_t allocationGranule, size_t pageSize)
ASSERT(static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule);
}
-PassRefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes, void* ownerUID)
+RefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes, void* ownerUID)
{
- SpinLockHolder locker(&m_lock);
+ LockHolder locker(&m_lock);
if (!sizeInBytes)
- return 0;
+ return nullptr;
sizeInBytes = roundUp(sizeInBytes);
@@ -162,7 +164,7 @@ PassRefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes, void
start = allocateNewSpace(numberOfPages);
if (!start)
- return 0;
+ return nullptr;
ASSERT(numberOfPages >= requestedNumberOfPages);
@@ -184,17 +186,17 @@ PassRefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes, void
m_numAllocations++;
#endif
- MetaAllocatorHandle* handle = new MetaAllocatorHandle(this, start, sizeInBytes, ownerUID);
+ auto handle = adoptRef(*new MetaAllocatorHandle(this, start, sizeInBytes, ownerUID));
if (UNLIKELY(!!m_tracker))
- m_tracker->notify(handle);
+ m_tracker->notify(handle.ptr());
- return adoptRef(handle);
+ return WTFMove(handle);
}
MetaAllocator::Statistics MetaAllocator::currentStatistics()
{
- SpinLockHolder locker(&m_lock);
+ LockHolder locker(&m_lock);
Statistics result;
result.bytesAllocated = m_bytesAllocated;
result.bytesReserved = m_bytesReserved;
@@ -279,7 +281,7 @@ void MetaAllocator::addFreeSpaceFromReleasedHandle(void* start, size_t sizeInByt
void MetaAllocator::addFreshFreeSpace(void* start, size_t sizeInBytes)
{
- SpinLockHolder locker(&m_lock);
+ LockHolder locker(&m_lock);
m_bytesReserved += sizeInBytes;
addFreeSpace(start, sizeInBytes);
}
@@ -287,7 +289,7 @@ void MetaAllocator::addFreshFreeSpace(void* start, size_t sizeInBytes)
size_t MetaAllocator::debugFreeSpaceSize()
{
#ifndef NDEBUG
- SpinLockHolder locker(&m_lock);
+ LockHolder locker(&m_lock);
size_t result = 0;
for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node; node = node->successor())
result += node->m_sizeInBytes;
@@ -424,6 +426,13 @@ void MetaAllocator::decrementPageOccupancy(void* address, size_t sizeInBytes)
}
}
+bool MetaAllocator::isInAllocatedMemory(const LockHolder&, void* address)
+{
+ ASSERT(m_lock.isLocked());
+ uintptr_t page = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
+ return m_pageOccupancyMap.contains(page);
+}
+
size_t MetaAllocator::roundUp(size_t sizeInBytes)
{
if (std::numeric_limits<size_t>::max() - m_allocationGranule <= sizeInBytes)
diff --git a/Source/WTF/wtf/MetaAllocator.h b/Source/WTF/wtf/MetaAllocator.h
index 0adc93bb4..4bcd93b72 100644
--- a/Source/WTF/wtf/MetaAllocator.h
+++ b/Source/WTF/wtf/MetaAllocator.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,13 +31,13 @@
#include <wtf/Assertions.h>
#include <wtf/HashMap.h>
+#include <wtf/Lock.h>
#include <wtf/MetaAllocatorHandle.h>
#include <wtf/Noncopyable.h>
#include <wtf/PageBlock.h>
#include <wtf/RedBlackTree.h>
#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
-#include <wtf/TCSpinLock.h>
namespace WTF {
@@ -68,7 +68,7 @@ public:
WTF_EXPORT_PRIVATE virtual ~MetaAllocator();
- WTF_EXPORT_PRIVATE PassRefPtr<MetaAllocatorHandle> allocate(size_t sizeInBytes, void* ownerUID);
+ WTF_EXPORT_PRIVATE RefPtr<MetaAllocatorHandle> allocate(size_t sizeInBytes, void* ownerUID);
void trackAllocations(MetaAllocatorTracker* tracker)
{
@@ -86,7 +86,7 @@ public:
size_t bytesReserved;
size_t bytesCommitted;
};
- Statistics currentStatistics();
+ WTF_EXPORT_PRIVATE Statistics currentStatistics();
// Add more free space to the allocator. Call this directly from
// the constructor if you wish to operate the allocator within a
@@ -96,6 +96,9 @@ public:
// This is meant only for implementing tests. Never call this in release
// builds.
WTF_EXPORT_PRIVATE size_t debugFreeSpaceSize();
+
+ Lock& getLock() { return m_lock; }
+ WTF_EXPORT_PRIVATE bool isInAllocatedMemory(const LockHolder&, void* address);
#if ENABLE(META_ALLOCATOR_PROFILE)
void dumpProfile();
@@ -183,7 +186,7 @@ private:
size_t m_bytesReserved;
size_t m_bytesCommitted;
- SpinLock m_lock;
+ Lock m_lock;
MetaAllocatorTracker* m_tracker;
diff --git a/Source/WTF/wtf/MetaAllocatorHandle.h b/Source/WTF/wtf/MetaAllocatorHandle.h
index cf15eb34e..848511c5a 100644
--- a/Source/WTF/wtf/MetaAllocatorHandle.h
+++ b/Source/WTF/wtf/MetaAllocatorHandle.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -37,6 +37,7 @@
namespace WTF {
class MetaAllocator;
+class PrintStream;
class MetaAllocatorHandle : public ThreadSafeRefCounted<MetaAllocatorHandle>, public RedBlackTree<MetaAllocatorHandle, void*>::Node {
private:
@@ -102,6 +103,8 @@ public:
{
return m_start;
}
+
+ WTF_EXPORT_PRIVATE void dump(PrintStream& out) const;
private:
friend class MetaAllocator;
diff --git a/Source/WTF/wtf/MonotonicTime.cpp b/Source/WTF/wtf/MonotonicTime.cpp
new file mode 100644
index 000000000..25d3f0e74
--- /dev/null
+++ b/Source/WTF/wtf/MonotonicTime.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MonotonicTime.h"
+
+#include "CurrentTime.h"
+#include "PrintStream.h"
+#include "TimeWithDynamicClockType.h"
+#include "WallTime.h"
+
+namespace WTF {
+
+MonotonicTime MonotonicTime::now()
+{
+ return fromRawSeconds(monotonicallyIncreasingTime());
+}
+
+WallTime MonotonicTime::approximateWallTime() const
+{
+ return *this - now() + WallTime::now();
+}
+
+void MonotonicTime::dump(PrintStream& out) const
+{
+ out.print("Monotonic(", m_value, " sec)");
+}
+
+} // namespace WTF
+
+
diff --git a/Source/WTF/wtf/MonotonicTime.h b/Source/WTF/wtf/MonotonicTime.h
new file mode 100644
index 000000000..2823a8b39
--- /dev/null
+++ b/Source/WTF/wtf/MonotonicTime.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_MonotonicTime_h
+#define WTF_MonotonicTime_h
+
+#include <wtf/ClockType.h>
+#include <wtf/Seconds.h>
+
+namespace WTF {
+
+class WallTime;
+class PrintStream;
+
+// The current time according to a monotonic clock. Monotonic clocks don't go backwards and
+// possibly don't count downtime. This uses floating point internally so that you can reason about
+// infinity and other things that arise in math. It's acceptable to use this to wrap NaN times,
+// negative times, and infinite times, so long as they are all relative to the same clock.
+// Specifically, MonotonicTime should be used in agreement with the principle that
+// MonotonicTime::now().secondsSinceEpoch().value() is the same as
+// WTF::monotonicallyIncreasingTime().
+class MonotonicTime {
+public:
+ static const ClockType clockType = ClockType::Monotonic;
+
+ // This is the epoch. So, x.secondsSinceEpoch() should be the same as x - MonotonicTime().
+ MonotonicTime() { }
+
+ // Call this if you know for sure that the double represents time according to
+ // WTF::monotonicallyIncreasingTime(). It must be in seconds and it must be from the same time
+ // source.
+ static MonotonicTime fromRawSeconds(double value)
+ {
+ MonotonicTime result;
+ result.m_value = value;
+ return result;
+ }
+
+ WTF_EXPORT_PRIVATE static MonotonicTime now();
+
+ static MonotonicTime infinity() { return fromRawSeconds(std::numeric_limits<double>::infinity()); }
+
+ Seconds secondsSinceEpoch() const { return Seconds(m_value); }
+
+ MonotonicTime approximateMonotonicTime() const { return *this; }
+ WTF_EXPORT_PRIVATE WallTime approximateWallTime() const;
+
+ explicit operator bool() const { return !!m_value; }
+
+ MonotonicTime operator+(Seconds other) const
+ {
+ return fromRawSeconds(m_value + other.value());
+ }
+
+ MonotonicTime operator-(Seconds other) const
+ {
+ return fromRawSeconds(m_value - other.value());
+ }
+
+ // Time is a scalar and scalars can be negated as this could arise from algebraic
+ // transformations. So, we allow it.
+ MonotonicTime operator-() const
+ {
+ return fromRawSeconds(-m_value);
+ }
+
+ MonotonicTime operator+=(Seconds other)
+ {
+ return *this = *this + other;
+ }
+
+ MonotonicTime operator-=(Seconds other)
+ {
+ return *this = *this - other;
+ }
+
+ Seconds operator-(MonotonicTime other) const
+ {
+ return Seconds(m_value - other.m_value);
+ }
+
+ bool operator==(MonotonicTime other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ bool operator!=(MonotonicTime other) const
+ {
+ return m_value != other.m_value;
+ }
+
+ bool operator<(MonotonicTime other) const
+ {
+ return m_value < other.m_value;
+ }
+
+ bool operator>(MonotonicTime other) const
+ {
+ return m_value > other.m_value;
+ }
+
+ bool operator<=(MonotonicTime other) const
+ {
+ return m_value <= other.m_value;
+ }
+
+ bool operator>=(MonotonicTime other) const
+ {
+ return m_value >= other.m_value;
+ }
+
+ WTF_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+private:
+ double m_value { 0 };
+};
+
+} // namespace WTF
+
+using WTF::MonotonicTime;
+
+#endif // WTF_MonotonicTime_h
diff --git a/Source/WTF/wtf/NakedPtr.h b/Source/WTF/wtf/NakedPtr.h
new file mode 100644
index 000000000..585cb1c63
--- /dev/null
+++ b/Source/WTF/wtf/NakedPtr.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_NakedPtr_h
+#define WTF_NakedPtr_h
+
+#include <wtf/FastMalloc.h>
+
+namespace WTF {
+
+// The purpose of this class is to ensure that the wrapped pointer will never be
+// used uninitialized.
+
+template <typename T> class NakedPtr {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ ALWAYS_INLINE NakedPtr() : m_ptr(nullptr) { }
+ ALWAYS_INLINE NakedPtr(T* ptr) : m_ptr(ptr) { }
+ ALWAYS_INLINE NakedPtr(const NakedPtr& o) : m_ptr(o.m_ptr) { }
+ template<typename U> NakedPtr(const NakedPtr<U>& o) : m_ptr(o.get()) { }
+
+ ALWAYS_INLINE NakedPtr(NakedPtr&& o) : m_ptr(o.get()) { }
+ template<typename U> NakedPtr(NakedPtr<U>&& o) : m_ptr(o.get()) { }
+
+ T* get() const { return m_ptr; }
+
+ void clear() { m_ptr = nullptr; }
+
+ T& operator*() const { ASSERT(m_ptr); return *m_ptr; }
+ ALWAYS_INLINE T* operator->() const { return m_ptr; }
+
+ operator T*() { return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ explicit operator bool() const { return !!m_ptr; }
+
+ NakedPtr& operator=(const NakedPtr&);
+ NakedPtr& operator=(T*);
+ template<typename U> NakedPtr& operator=(const NakedPtr<U>&);
+ NakedPtr& operator=(NakedPtr&&);
+ template<typename U> NakedPtr& operator=(NakedPtr<U>&&);
+
+ void swap(NakedPtr&);
+
+private:
+ T* m_ptr;
+};
+
+template<typename T> inline NakedPtr<T>& NakedPtr<T>::operator=(const NakedPtr& o)
+{
+ m_ptr = o.m_ptr;
+ return *this;
+}
+
+template<typename T> inline NakedPtr<T>& NakedPtr<T>::operator=(T* optr)
+{
+ m_ptr = optr;
+ return *this;
+}
+
+template<typename T> template<typename U> inline NakedPtr<T>& NakedPtr<T>::operator=(const NakedPtr<U>& o)
+{
+ NakedPtr ptr = o;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> inline NakedPtr<T>& NakedPtr<T>::operator=(NakedPtr&& o)
+{
+ NakedPtr ptr = WTFMove(o);
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline NakedPtr<T>& NakedPtr<T>::operator=(NakedPtr<U>&& o)
+{
+ NakedPtr ptr = WTFMove(o);
+ swap(ptr);
+ return *this;
+}
+
+template<class T> inline void NakedPtr<T>::swap(NakedPtr& o)
+{
+ std::swap(m_ptr, o.m_ptr);
+}
+
+template<class T> inline void swap(NakedPtr<T>& a, NakedPtr<T>& b)
+{
+ a.swap(b);
+}
+
+} // namespace WTF
+
+using WTF::NakedPtr;
+
+#endif // WTF_NakedPtr_h
diff --git a/Source/WTF/wtf/NeverDestroyed.h b/Source/WTF/wtf/NeverDestroyed.h
index e021f98da..b824634f8 100644
--- a/Source/WTF/wtf/NeverDestroyed.h
+++ b/Source/WTF/wtf/NeverDestroyed.h
@@ -29,6 +29,7 @@
#include <type_traits>
#include <utility>
#include <wtf/Noncopyable.h>
+#include <wtf/RefCounted.h>
// NeverDestroyed is a smart pointer like class who ensures that the destructor
// for the given object is never called, but doesn't use the heap to allocate it.
@@ -49,7 +50,7 @@ public:
template<typename... Args>
NeverDestroyed(Args&&... args)
{
- new (asPtr()) T(std::forward<Args>(args)...);
+ MaybeRelax<T>(new (asPtr()) T(std::forward<Args>(args)...));
}
operator T&() { return *asPtr(); }
@@ -63,10 +64,69 @@ private:
// FIXME: Investigate whether we should allocate a hunk of virtual memory
// and hand out chunks of it to NeverDestroyed instead, to reduce fragmentation.
typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type m_storage;
+
+ template <typename PtrType, bool ShouldRelax = std::is_base_of<RefCountedBase, PtrType>::value> struct MaybeRelax {
+ explicit MaybeRelax(PtrType*) { }
+ };
+ template <typename PtrType> struct MaybeRelax<PtrType, true> {
+ explicit MaybeRelax(PtrType* ptr) { ptr->relaxAdoptionRequirement(); }
+ };
+};
+
+template<typename T> class LazyNeverDestroyed {
+ WTF_MAKE_NONCOPYABLE(LazyNeverDestroyed);
+
+public:
+ LazyNeverDestroyed() = default;
+
+ template<typename... Args>
+ void construct(Args&&... args)
+ {
+ ASSERT(!m_isConstructed);
+
+#if !ASSERT_DISABLED
+ m_isConstructed = true;
+#endif
+
+ MaybeRelax<T>(new (asPtr()) T(std::forward<Args>(args)...));
+ }
+
+ operator T&() { return *asPtr(); }
+ T& get() { return *asPtr(); }
+
+ T* operator->() { return asPtr(); }
+
+private:
+ typedef typename std::remove_const<T>::type* PointerType;
+
+ PointerType asPtr()
+ {
+ ASSERT(m_isConstructed);
+
+ return reinterpret_cast<PointerType>(&m_storage);
+ }
+
+ // FIXME: Investigate whether we should allocate a hunk of virtual memory
+ // and hand out chunks of it to NeverDestroyed instead, to reduce fragmentation.
+ typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type m_storage;
+
+ template <typename PtrType, bool ShouldRelax = std::is_base_of<RefCountedBase, PtrType>::value> struct MaybeRelax {
+ explicit MaybeRelax(PtrType*) { }
+ };
+ template <typename PtrType> struct MaybeRelax<PtrType, true> {
+ explicit MaybeRelax(PtrType* ptr) { ptr->relaxAdoptionRequirement(); }
+ };
+
+#if !ASSERT_DISABLED
+ // LazyNeverDestroyed objects are always static, so this variable is initialized to false.
+ // It must not be initialized dynamically, because that would not be thread safe.
+ bool m_isConstructed;
+#endif
};
} // namespace WTF;
+using WTF::LazyNeverDestroyed;
using WTF::NeverDestroyed;
#endif // NeverDestroyed_h
diff --git a/Source/WTF/wtf/NumberOfCores.cpp b/Source/WTF/wtf/NumberOfCores.cpp
index 75d8af70f..ea5f825dc 100644
--- a/Source/WTF/wtf/NumberOfCores.cpp
+++ b/Source/WTF/wtf/NumberOfCores.cpp
@@ -26,13 +26,15 @@
#include "config.h"
#include "NumberOfCores.h"
-#if OS(DARWIN) || OS(OPENBSD) || OS(NETBSD) || OS(FREEBSD)
+#include <cstdio>
+
+#if OS(DARWIN)
#include <sys/param.h>
// sys/types.h must come before sys/sysctl.h because the latter uses
// data types defined in the former. See sysctl(3) and style(9).
#include <sys/types.h>
#include <sys/sysctl.h>
-#elif OS(LINUX) || OS(AIX) || OS(SOLARIS)
+#elif OS(LINUX) || OS(AIX) || OS(SOLARIS) || OS(OPENBSD) || OS(NETBSD) || OS(FREEBSD)
#include <unistd.h>
#elif OS(WINDOWS)
#include <windows.h>
@@ -47,18 +49,27 @@ int numberOfProcessorCores()
if (s_numberOfCores > 0)
return s_numberOfCores;
+
+ if (const char* coresEnv = getenv("WTF_numberOfProcessorCores")) {
+ unsigned numberOfCores;
+ if (sscanf(coresEnv, "%u", &numberOfCores) == 1) {
+ s_numberOfCores = numberOfCores;
+ return s_numberOfCores;
+ } else
+ fprintf(stderr, "WARNING: failed to parse WTF_numberOfProcessorCores=%s\n", coresEnv);
+ }
-#if OS(DARWIN) || OS(OPENBSD) || OS(NETBSD) || OS(FREEBSD)
+#if OS(DARWIN)
unsigned result;
size_t length = sizeof(result);
int name[] = {
CTL_HW,
- HW_NCPU
+ HW_AVAILCPU
};
int sysctlResult = sysctl(name, sizeof(name) / sizeof(int), &result, &length, 0, 0);
s_numberOfCores = sysctlResult < 0 ? defaultIfUnavailable : result;
-#elif OS(LINUX) || OS(AIX) || OS(SOLARIS)
+#elif OS(LINUX) || OS(AIX) || OS(SOLARIS) || OS(OPENBSD) || OS(NETBSD) || OS(FREEBSD)
long sysconfResult = sysconf(_SC_NPROCESSORS_ONLN);
s_numberOfCores = sysconfResult < 0 ? defaultIfUnavailable : static_cast<int>(sysconfResult);
diff --git a/Source/WTF/wtf/OSAllocator.h b/Source/WTF/wtf/OSAllocator.h
index a57667213..c9cb155c0 100644
--- a/Source/WTF/wtf/OSAllocator.h
+++ b/Source/WTF/wtf/OSAllocator.h
@@ -63,13 +63,15 @@ public:
// committing/decommitting the entire region additional parameters allow a subregion to be
// specified.
WTF_EXPORT_PRIVATE static void* reserveAndCommit(size_t reserveSize, size_t commitSize, Usage = UnknownUsage, bool writable = true, bool executable = false);
- static void decommitAndRelease(void* releaseBase, size_t releaseSize, void* decommitBase, size_t decommitSize);
// Reallocate an existing, committed allocation.
// The prior allocation must be fully comitted, and the new size will also be fully committed.
// This interface is provided since it may be possible to optimize this operation on some platforms.
template<typename T>
static T* reallocateCommitted(T*, size_t oldSize, size_t newSize, Usage = UnknownUsage, bool writable = true, bool executable = false);
+
+ // Hint to the OS that an address range is not expected to be accessed anytime soon.
+ WTF_EXPORT_PRIVATE static void hintMemoryNotNeededSoon(void*, size_t);
};
inline void* OSAllocator::reserveAndCommit(size_t reserveSize, size_t commitSize, Usage usage, bool writable, bool executable)
@@ -79,25 +81,11 @@ inline void* OSAllocator::reserveAndCommit(size_t reserveSize, size_t commitSize
return base;
}
-inline void OSAllocator::decommitAndRelease(void* releaseBase, size_t releaseSize, void* decommitBase, size_t decommitSize)
+inline void OSAllocator::decommitAndRelease(void* releaseBase, size_t releaseSize)
{
- ASSERT(decommitBase >= releaseBase && (static_cast<char*>(decommitBase) + decommitSize) <= (static_cast<char*>(releaseBase) + releaseSize));
-#if OS(WINCE)
- // On most platforms we can actually skip this final decommit; releasing the VM will
- // implicitly decommit any physical memory in the region. This is not true on WINCE.
- decommit(decommitBase, decommitSize);
-#else
- UNUSED_PARAM(decommitBase);
- UNUSED_PARAM(decommitSize);
-#endif
releaseDecommitted(releaseBase, releaseSize);
}
-inline void OSAllocator::decommitAndRelease(void* base, size_t size)
-{
- decommitAndRelease(base, size, base, size);
-}
-
template<typename T>
inline T* OSAllocator::reallocateCommitted(T* oldBase, size_t oldSize, size_t newSize, Usage usage, bool writable, bool executable)
{
diff --git a/Source/WTF/wtf/OSAllocatorPosix.cpp b/Source/WTF/wtf/OSAllocatorPosix.cpp
index 61661eecc..f5e6669a9 100644
--- a/Source/WTF/wtf/OSAllocatorPosix.cpp
+++ b/Source/WTF/wtf/OSAllocatorPosix.cpp
@@ -50,8 +50,10 @@ void* OSAllocator::reserveUncommitted(size_t bytes, Usage usage, bool writable,
#else
void* result = reserveAndCommit(bytes, usage, writable, executable, includesGuardPages);
#if HAVE(MADV_FREE_REUSE)
- // To support the "reserve then commit" model, we have to initially decommit.
- while (madvise(result, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+ if (result) {
+ // To support the "reserve then commit" model, we have to initially decommit.
+ while (madvise(result, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+ }
#endif
#endif
@@ -104,11 +106,9 @@ void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bo
result = mmap(result, bytes, protection, flags, fd, 0);
if (result == MAP_FAILED) {
-#if ENABLE(LLINT)
if (executable)
result = 0;
else
-#endif
CRASH();
}
if (result && includesGuardPages) {
@@ -164,6 +164,16 @@ void OSAllocator::decommit(void* address, size_t bytes)
#endif
}
+void OSAllocator::hintMemoryNotNeededSoon(void* address, size_t bytes)
+{
+#if HAVE(MADV_DONTNEED)
+ while (madvise(address, bytes, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+#else
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(bytes);
+#endif
+}
+
void OSAllocator::releaseDecommitted(void* address, size_t bytes)
{
int result = munmap(address, bytes);
diff --git a/Source/WTF/wtf/OSAllocatorWin.cpp b/Source/WTF/wtf/OSAllocatorWin.cpp
index 37e013013..6a457f423 100644
--- a/Source/WTF/wtf/OSAllocatorWin.cpp
+++ b/Source/WTF/wtf/OSAllocatorWin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -73,6 +73,8 @@ void OSAllocator::decommit(void* address, size_t bytes)
// See: https://bugs.webkit.org/show_bug.cgi?id=121972.
if (!bytes)
return;
+ // Silence warning about using MEM_DECOMMIT instead of MEM_RELEASE:
+#pragma warning(suppress: 6250)
bool result = VirtualFree(address, bytes, MEM_DECOMMIT);
if (!result)
CRASH();
@@ -91,6 +93,10 @@ void OSAllocator::releaseDecommitted(void* address, size_t bytes)
CRASH();
}
+void OSAllocator::hintMemoryNotNeededSoon(void*, size_t)
+{
+}
+
} // namespace WTF
#endif // OS(WINDOWS)
diff --git a/Source/WTF/wtf/OSObjectPtr.h b/Source/WTF/wtf/OSObjectPtr.h
new file mode 100644
index 000000000..ed7eceb92
--- /dev/null
+++ b/Source/WTF/wtf/OSObjectPtr.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef OSObjectPtr_h
+#define OSObjectPtr_h
+
+#include <os/object.h>
+#include <wtf/Assertions.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+template<typename T> class OSObjectPtr;
+template<typename T> OSObjectPtr<T> adoptOSObject(T);
+
+template<typename T>
+static inline void retainOSObject(T ptr)
+{
+ os_retain(ptr);
+}
+
+template<typename T>
+static inline void releaseOSObject(T ptr)
+{
+ os_release(ptr);
+}
+
+template<typename T> class OSObjectPtr {
+public:
+ OSObjectPtr()
+ : m_ptr(nullptr)
+ {
+ }
+
+ ~OSObjectPtr()
+ {
+ if (m_ptr)
+ releaseOSObject(m_ptr);
+ }
+
+ T get() const { return m_ptr; }
+
+ explicit operator bool() const { return m_ptr; }
+ bool operator!() const { return !m_ptr; }
+
+ OSObjectPtr(const OSObjectPtr& other)
+ : m_ptr(other.m_ptr)
+ {
+ if (m_ptr)
+ retainOSObject(m_ptr);
+ }
+
+ OSObjectPtr(OSObjectPtr&& other)
+ : m_ptr(other.m_ptr)
+ {
+ other.m_ptr = nullptr;
+ }
+
+ OSObjectPtr& operator=(const OSObjectPtr& other)
+ {
+ OSObjectPtr ptr = other;
+ swap(ptr);
+ return *this;
+ }
+
+ OSObjectPtr& operator=(OSObjectPtr&& other)
+ {
+ OSObjectPtr ptr = WTFMove(other);
+ swap(ptr);
+ return *this;
+ }
+
+ OSObjectPtr& operator=(std::nullptr_t)
+ {
+ if (m_ptr)
+ releaseOSObject(m_ptr);
+ m_ptr = nullptr;
+
+ return *this;
+ }
+
+ void swap(OSObjectPtr& other)
+ {
+ std::swap(m_ptr, other.m_ptr);
+ }
+
+ T leakRef() WARN_UNUSED_RETURN
+ {
+ return std::exchange(m_ptr, nullptr);
+ }
+
+ friend OSObjectPtr adoptOSObject<T>(T);
+
+private:
+ struct AdoptOSObject { };
+ OSObjectPtr(AdoptOSObject, T ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ T m_ptr;
+};
+
+template<typename T> inline OSObjectPtr<T> adoptOSObject(T ptr)
+{
+ return OSObjectPtr<T>(typename OSObjectPtr<T>::AdoptOSObject { }, ptr);
+}
+
+} // namespace WTF
+
+using WTF::OSObjectPtr;
+using WTF::adoptOSObject;
+
+#endif // OSObjectPtr_h
diff --git a/Source/WTF/wtf/OSRandomSource.cpp b/Source/WTF/wtf/OSRandomSource.cpp
index 2495abf71..378795dc7 100644
--- a/Source/WTF/wtf/OSRandomSource.cpp
+++ b/Source/WTF/wtf/OSRandomSource.cpp
@@ -13,7 +13,7 @@
* THIS SOFTWARE IS PROVIDED BY GOOGLE, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -29,7 +29,8 @@
#include <stdint.h>
#include <stdlib.h>
-#if OS(UNIX)
+#if !OS(DARWIN) && OS(UNIX)
+#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#endif
@@ -39,19 +40,47 @@
#include <wincrypt.h> // windows.h must be included before wincrypt.h.
#endif
+#if OS(DARWIN)
+#include "CommonCryptoSPI.h"
+#endif
+
namespace WTF {
+#if !OS(DARWIN) && OS(UNIX)
+NEVER_INLINE NO_RETURN_DUE_TO_CRASH static void crashUnableToOpenURandom()
+{
+ CRASH();
+}
+
+NEVER_INLINE NO_RETURN_DUE_TO_CRASH static void crashUnableToReadFromURandom()
+{
+ CRASH();
+}
+#endif
+
void cryptographicallyRandomValuesFromOS(unsigned char* buffer, size_t length)
{
-#if OS(UNIX)
+#if OS(DARWIN)
+ RELEASE_ASSERT(!CCRandomCopyBytes(kCCRandomDefault, buffer, length));
+#elif OS(UNIX)
int fd = open("/dev/urandom", O_RDONLY, 0);
if (fd < 0)
- CRASH(); // We need /dev/urandom for this API to work...
-
- if (read(fd, buffer, length) != static_cast<ssize_t>(length))
- CRASH();
+ crashUnableToOpenURandom(); // We need /dev/urandom for this API to work...
+ ssize_t amountRead = 0;
+ while (static_cast<size_t>(amountRead) < length) {
+ ssize_t currentRead = read(fd, buffer + amountRead, length - amountRead);
+ // We need to check for both EAGAIN and EINTR since on some systems /dev/urandom
+ // is blocking and on others it is non-blocking.
+ if (currentRead == -1) {
+ if (!(errno == EAGAIN || errno == EINTR))
+ crashUnableToReadFromURandom();
+ } else
+ amountRead += currentRead;
+ }
+
close(fd);
+
#elif OS(WINDOWS)
HCRYPTPROV hCryptProv = 0;
if (!CryptAcquireContext(&hCryptProv, 0, MS_DEF_PROV, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT))
diff --git a/Source/WTF/wtf/OSRandomSource.h b/Source/WTF/wtf/OSRandomSource.h
index 266d44ac2..9f783df51 100644
--- a/Source/WTF/wtf/OSRandomSource.h
+++ b/Source/WTF/wtf/OSRandomSource.h
@@ -13,7 +13,7 @@
* THIS SOFTWARE IS PROVIDED BY GOOGLE, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
diff --git a/Source/WTF/wtf/OptionSet.h b/Source/WTF/wtf/OptionSet.h
new file mode 100644
index 000000000..e839f4222
--- /dev/null
+++ b/Source/WTF/wtf/OptionSet.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <initializer_list>
+#include <iterator>
+#include <type_traits>
+#include <wtf/Assertions.h>
+#include <wtf/MathExtras.h>
+
+namespace WTF {
+
+// OptionSet is a class that represents a set of enumerators in a space-efficient manner. The enumerators
+// must be powers of two greater than 0. This class is useful as a replacement for passing a bitmask of
+// enumerators around.
+template<typename T> class OptionSet {
+ static_assert(std::is_enum<T>::value, "T is not an enum type");
+ typedef typename std::make_unsigned<typename std::underlying_type<T>::type>::type StorageType;
+
+public:
+ template<typename StorageType> class Iterator {
+ public:
+ // Isolate the rightmost set bit.
+ T operator*() const { return static_cast<T>(m_value & -m_value); }
+
+ // Iterates from smallest to largest enum value by turning off the rightmost set bit.
+ Iterator& operator++()
+ {
+ m_value &= m_value - 1;
+ return *this;
+ }
+
+ Iterator& operator++(int) = delete;
+
+ bool operator==(const Iterator& other) const { return m_value == other.m_value; }
+ bool operator!=(const Iterator& other) const { return m_value != other.m_value; }
+
+ private:
+ Iterator(StorageType value) : m_value(value) { }
+ friend OptionSet;
+
+ StorageType m_value;
+ };
+ using iterator = Iterator<StorageType>;
+
+ static constexpr OptionSet fromRaw(StorageType storageType)
+ {
+ return OptionSet(static_cast<T>(storageType), FromRawValue);
+ }
+
+ constexpr OptionSet() = default;
+
+#if ASSERT_DISABLED
+ constexpr OptionSet(T t)
+ : m_storage(static_cast<StorageType>(t))
+ {
+ }
+#else
+ OptionSet(T t)
+ : m_storage(static_cast<StorageType>(t))
+ {
+ ASSERT_WITH_MESSAGE(hasOneBitSet(static_cast<StorageType>(t)), "Enumerator is not a positive power of two.");
+ }
+#endif
+
+ // FIXME: Make this constexpr once we adopt C++14 as C++11 does not support for-loops
+ // in a constexpr function.
+ OptionSet(std::initializer_list<T> initializerList)
+ {
+ for (auto& option : initializerList) {
+ ASSERT_WITH_MESSAGE(hasOneBitSet(static_cast<StorageType>(option)), "Enumerator is not a positive power of two.");
+ m_storage |= static_cast<StorageType>(option);
+ }
+ }
+
+ constexpr StorageType toRaw() const { return m_storage; }
+
+ constexpr bool isEmpty() const { return !m_storage; }
+
+ constexpr iterator begin() const { return m_storage; }
+ constexpr iterator end() const { return 0; }
+
+ constexpr bool contains(OptionSet optionSet) const
+ {
+ return m_storage & optionSet.m_storage;
+ }
+
+ constexpr friend bool operator==(OptionSet lhs, OptionSet rhs)
+ {
+ return lhs.m_storage == rhs.m_storage;
+ }
+
+ constexpr friend bool operator!=(OptionSet lhs, OptionSet rhs)
+ {
+ return lhs.m_storage != rhs.m_storage;
+ }
+
+ friend OptionSet& operator|=(OptionSet& lhs, OptionSet rhs)
+ {
+ lhs.m_storage |= rhs.m_storage;
+
+ return lhs;
+ }
+
+ constexpr friend OptionSet operator-(OptionSet lhs, OptionSet rhs)
+ {
+ return OptionSet::fromRaw(lhs.m_storage & ~rhs.m_storage);
+ }
+
+private:
+ enum InitializationTag { FromRawValue };
+ constexpr OptionSet(T t, InitializationTag)
+ : m_storage(static_cast<StorageType>(t))
+ {
+ }
+ StorageType m_storage { 0 };
+};
+
+}
+
+using WTF::OptionSet;
diff --git a/Source/WTF/wtf/Optional.h b/Source/WTF/wtf/Optional.h
new file mode 100644
index 000000000..2b3c7c124
--- /dev/null
+++ b/Source/WTF/wtf/Optional.h
@@ -0,0 +1,1109 @@
+// Copyright (C) 2011 - 2012 Andrzej Krzemienski.
+//
+// Use, modification, and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// The idea and interface is based on Boost.Optional library
+// authored by Fernando Luis Cacciola Carballal
+//
+// Boost Software License - Version 1.0 - August 17th, 2003
+//
+// Permission is hereby granted, free of charge, to any person or organization
+// obtaining a copy of the software and accompanying documentation covered by
+// this license (the "Software") to use, reproduce, display, distribute,
+// execute, and transmit the Software, and to prepare derivative works of the
+// Software, and to permit third-parties to whom the Software is furnished to
+// do so, all subject to the following:
+//
+// The copyright notices in the Software and this entire statement, including
+// the above license grant, this restriction and the following disclaimer,
+// must be included in all copies of the Software, in whole or in part, and
+// all derivative works of the Software, unless such copies or derivative
+// works are solely in the form of machine-executable object code generated by
+// a source language processor.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+// DEALINGS IN THE SOFTWARE.
+
+// Copied from https://github.com/akrzemi1/Optional (727c729dd1d9f06f225868280e50154594d7e59d)
+
+// Modified to make it compile with exceptions disabled.
+
+#pragma once
+
+# include <utility>
+# include <type_traits>
+# include <initializer_list>
+# include <cassert>
+# include <functional>
+# include <string>
+# include <stdexcept>
+# include <wtf/Assertions.h>
+# include <wtf/Compiler.h>
+# include <wtf/StdLibExtras.h>
+
+# define TR2_OPTIONAL_REQUIRES(...) typename std::enable_if<__VA_ARGS__::value, bool>::type = false
+
+# if defined __GNUC__ // NOTE: GNUC is also defined for Clang
+# if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)
+# define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___
+# elif (__GNUC__ > 4)
+# define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___
+# endif
+#
+# if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)
+# define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___
+# elif (__GNUC__ > 4)
+# define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___
+# endif
+#
+# if (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) && (__GNUC_PATCHLEVEL__ >= 1)
+# define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
+# elif (__GNUC__ == 4) && (__GNUC_MINOR__ >= 9)
+# define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
+# elif (__GNUC__ > 4)
+# define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
+# endif
+# endif
+#
+# if defined __clang_major__
+# if (__clang_major__ == 3 && __clang_minor__ >= 5)
+# define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_
+# elif (__clang_major__ > 3)
+# define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_
+# endif
+# if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_
+# define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_
+# elif (__clang_major__ == 3 && __clang_minor__ == 4 && __clang_patchlevel__ >= 2)
+# define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_
+# endif
+# endif
+#
+# if defined _MSC_VER
+# if (_MSC_VER >= 1900)
+# define TR2_OPTIONAL_MSVC_2015_AND_HIGHER___
+# endif
+# endif
+
+# if defined __clang__
+# if (__clang_major__ > 2) || (__clang_major__ == 2) && (__clang_minor__ >= 9)
+# define OPTIONAL_HAS_THIS_RVALUE_REFS 1
+# else
+# define OPTIONAL_HAS_THIS_RVALUE_REFS 0
+# endif
+# elif defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
+# define OPTIONAL_HAS_THIS_RVALUE_REFS 1
+# elif defined TR2_OPTIONAL_MSVC_2015_AND_HIGHER___
+# define OPTIONAL_HAS_THIS_RVALUE_REFS 1
+# else
+# define OPTIONAL_HAS_THIS_RVALUE_REFS 0
+# endif
+
+
+# if defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
+# define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 1
+# define OPTIONAL_CONSTEXPR_INIT_LIST constexpr
+# else
+# define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 0
+# define OPTIONAL_CONSTEXPR_INIT_LIST
+# endif
+
+// FIXME: To make the result of value() type consistent among the compilers, we now intentionally disables move accessors.
+# define OPTIONAL_HAS_MOVE_ACCESSORS 0
+// # if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ && (defined __cplusplus) && (__cplusplus != 201103L)
+// # define OPTIONAL_HAS_MOVE_ACCESSORS 1
+// # else
+// # define OPTIONAL_HAS_MOVE_ACCESSORS 0
+// # endif
+
+# // In C++11 constexpr implies const, so we need to make non-const members also non-constexpr
+# if (defined __cplusplus) && (__cplusplus == 201103L)
+# define OPTIONAL_MUTABLE_CONSTEXPR
+# else
+# define OPTIONAL_MUTABLE_CONSTEXPR constexpr
+# endif
+
+#if COMPILER_SUPPORTS(EXCEPTIONS)
+#define __THROW_EXCEPTION(__exception) throw __exception;
+#define __NOEXCEPT noexcept
+#define __NOEXCEPT_(__exception) noexcept(__exception)
+#else
+#define __THROW_EXCEPTION(__exception) do { (void)__exception; CRASH(); } while (0);
+#define __NOEXCEPT
+#define __NOEXCEPT_(...)
+#endif
+
+namespace std {
+namespace detail_ {
+
+// NOTE: All our target compilers support is_trivially_destructible.
+// // BEGIN workaround for missing is_trivially_destructible
+// # if defined TR2_OPTIONAL_GCC_4_8_AND_HIGHER___
+// // leave it: it is already there
+// # elif defined TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_
+// // leave it: it is already there
+// # elif defined TR2_OPTIONAL_MSVC_2015_AND_HIGHER___
+// // leave it: it is already there
+// # elif defined TR2_OPTIONAL_DISABLE_EMULATION_OF_TYPE_TRAITS
+// // leave it: the user doesn't want it
+// # else
+// template <typename T>
+// using is_trivially_destructible = std::has_trivial_destructor<T>;
+// # endif
+// // END workaround for missing is_trivially_destructible
+
+#if COMPILER_SUPPORTS(EXCEPTIONS)
+# if defined(TR2_OPTIONAL_GCC_4_7_AND_HIGHER___) || defined(TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_) || defined(TR2_OPTIONAL_MSVC_2015_AND_HIGHER___)
+ // leave it; our metafunctions are already defined.
+ template <typename T>
+ using is_nothrow_move_constructible = std::is_nothrow_move_constructible<T>;
+ template <typename T>
+ using is_nothrow_move_assignable = std::is_nothrow_move_assignable<T>;
+# elif defined TR2_OPTIONAL_DISABLE_EMULATION_OF_TYPE_TRAITS
+ // leave it: the user doesn't want it
+# else
+
+
+// workaround for missing traits in GCC and CLANG
+template <class T>
+struct is_nothrow_move_constructible
+{
+ constexpr static bool value = std::is_nothrow_constructible<T, T&&>::value;
+};
+
+
+template <class T, class U>
+struct is_assignable
+{
+ template <class X, class Y>
+ constexpr static bool has_assign(...) { return false; }
+
+ template <class X, class Y, size_t S = sizeof((std::declval<X>() = std::declval<Y>(), true)) >
+ // the comma operator is necessary for the cases where operator= returns void
+ constexpr static bool has_assign(bool) { return true; }
+
+ constexpr static bool value = has_assign<T, U>(true);
+};
+
+
+template <class T>
+struct is_nothrow_move_assignable
+{
+ template <class X, bool has_any_move_assign>
+ struct has_nothrow_move_assign {
+ constexpr static bool value = false;
+ };
+
+ template <class X>
+ struct has_nothrow_move_assign<X, true> {
+ constexpr static bool value = __NOEXCEPT_( std::declval<X&>() = std::declval<X&&>() );
+ };
+
+ constexpr static bool value = has_nothrow_move_assign<T, is_assignable<T&, T&&>::value>::value;
+};
+// end workaround
+
+
+# endif
+#endif
+
+} // namespace detail_
+
+// 20.5.4, optional for object types
+template <class T> class optional;
+
+// 20.5.5, optional for lvalue reference types
+template <class T> class optional<T&>;
+
+namespace detail_ {
+
+// workaround: std utility functions aren't constexpr yet
+template <class T> inline constexpr T&& constexpr_forward(typename std::remove_reference<T>::type& t) __NOEXCEPT
+{
+ return static_cast<T&&>(t);
+}
+
+template <class T> inline constexpr T&& constexpr_forward(typename std::remove_reference<T>::type&& t) __NOEXCEPT
+{
+ static_assert(!std::is_lvalue_reference<T>::value, "!!");
+ return static_cast<T&&>(t);
+}
+
+template <class T> inline constexpr typename std::remove_reference<T>::type&& constexpr_move(T&& t) __NOEXCEPT
+{
+ return static_cast<typename std::remove_reference<T>::type&&>(t);
+}
+
+#if defined NDEBUG
+# define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) (EXPR)
+#else
+# define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) ((CHECK) ? (EXPR) : ([]{assert(!#CHECK);}(), (EXPR)))
+#endif
+
+
+// static_addressof: a constexpr version of addressof
+template <typename T>
+struct has_overloaded_addressof
+{
+ template <class X>
+ constexpr static bool has_overload(...) { return false; }
+
+ template <class X, size_t S = sizeof(std::declval<X&>().operator&()) >
+ constexpr static bool has_overload(bool) { return true; }
+
+ constexpr static bool value = has_overload<T>(true);
+};
+
+template <typename T, TR2_OPTIONAL_REQUIRES(!has_overloaded_addressof<T>)>
+constexpr T* static_addressof(T& ref)
+{
+ return &ref;
+}
+
+template <typename T, TR2_OPTIONAL_REQUIRES(has_overloaded_addressof<T>)>
+T* static_addressof(T& ref)
+{
+ return std::addressof(ref);
+}
+
+
+// the call to convert<A>(b) has return type A and converts b to type A iff b decltype(b) is implicitly convertible to A
+template <class U>
+constexpr U convert(U v) { return v; }
+
+} // namespace detail
+
+
+constexpr struct trivial_init_t{} trivial_init{};
+
+
+// 20.5.6, In-place construction
+constexpr struct in_place_t{} in_place{};
+
+
+// 20.5.7, Disengaged state indicator
+struct nullopt_t
+{
+ struct init{};
+ constexpr explicit nullopt_t(init){}
+};
+constexpr nullopt_t nullopt{nullopt_t::init()};
+
+
+// 20.5.8, class bad_optional_access
+class bad_optional_access : public std::logic_error {
+public:
+ explicit bad_optional_access(const std::string& what_arg) : std::logic_error{what_arg} {}
+ explicit bad_optional_access(const char* what_arg) : std::logic_error{what_arg} {}
+};
+
+
+template <class T>
+union storage_t
+{
+ unsigned char dummy_;
+ T value_;
+
+ constexpr storage_t( trivial_init_t ) __NOEXCEPT : dummy_() {};
+
+ template <class... Args>
+ constexpr storage_t( Args&&... args ) : value_(detail_::constexpr_forward<Args>(args)...) {}
+
+ ~storage_t(){}
+};
+
+
+template <class T>
+union constexpr_storage_t
+{
+ unsigned char dummy_;
+ T value_;
+
+ constexpr constexpr_storage_t( trivial_init_t ) __NOEXCEPT : dummy_() {};
+
+ template <class... Args>
+ constexpr constexpr_storage_t( Args&&... args ) : value_(detail_::constexpr_forward<Args>(args)...) {}
+
+ ~constexpr_storage_t() = default;
+};
+
+
+template <class T>
+struct optional_base
+{
+ bool init_;
+ storage_t<T> storage_;
+
+ constexpr optional_base() __NOEXCEPT : init_(false), storage_(trivial_init) {};
+
+ explicit constexpr optional_base(const T& v) : init_(true), storage_(v) {}
+
+ explicit constexpr optional_base(T&& v) : init_(true), storage_(detail_::constexpr_move(v)) {}
+
+ template <class... Args> explicit optional_base(in_place_t, Args&&... args)
+ : init_(true), storage_(detail_::constexpr_forward<Args>(args)...) {}
+
+ template <class U, class... Args, TR2_OPTIONAL_REQUIRES(std::is_constructible<T, std::initializer_list<U>>)>
+ explicit optional_base(in_place_t, std::initializer_list<U> il, Args&&... args)
+ : init_(true), storage_(il, std::forward<Args>(args)...) {}
+
+ ~optional_base() { if (init_) storage_.value_.T::~T(); }
+};
+
+
+template <class T>
+struct constexpr_optional_base
+{
+ bool init_;
+ constexpr_storage_t<T> storage_;
+
+ constexpr constexpr_optional_base() __NOEXCEPT : init_(false), storage_(trivial_init) {};
+
+ explicit constexpr constexpr_optional_base(const T& v) : init_(true), storage_(v) {}
+
+ explicit constexpr constexpr_optional_base(T&& v) : init_(true), storage_(detail_::constexpr_move(v)) {}
+
+ template <class... Args> explicit constexpr constexpr_optional_base(in_place_t, Args&&... args)
+ : init_(true), storage_(detail_::constexpr_forward<Args>(args)...) {}
+
+ template <class U, class... Args, TR2_OPTIONAL_REQUIRES(std::is_constructible<T, std::initializer_list<U>>)>
+ OPTIONAL_CONSTEXPR_INIT_LIST explicit constexpr_optional_base(in_place_t, std::initializer_list<U> il, Args&&... args)
+ : init_(true), storage_(il, std::forward<Args>(args)...) {}
+
+ ~constexpr_optional_base() = default;
+};
+
+template <class T>
+using OptionalBase = typename std::conditional<
+ std::is_trivially_destructible<T>::value, // if possible
+ constexpr_optional_base<typename std::remove_const<T>::type>, // use base with trivial destructor
+ optional_base<typename std::remove_const<T>::type>
+>::type;
+
+
+
+template <class T>
+class optional : private OptionalBase<T>
+{
+ static_assert( !std::is_same<typename std::decay<T>::type, nullopt_t>::value, "bad T" );
+ static_assert( !std::is_same<typename std::decay<T>::type, in_place_t>::value, "bad T" );
+
+
+ constexpr bool initialized() const __NOEXCEPT { return OptionalBase<T>::init_; }
+ typename std::remove_const<T>::type* dataptr() { return std::addressof(OptionalBase<T>::storage_.value_); }
+ constexpr const T* dataptr() const { return detail_::static_addressof(OptionalBase<T>::storage_.value_); }
+
+# if OPTIONAL_HAS_THIS_RVALUE_REFS == 1
+ constexpr const T& contained_val() const& { return OptionalBase<T>::storage_.value_; }
+# if OPTIONAL_HAS_MOVE_ACCESSORS == 1
+ OPTIONAL_MUTABLE_CONSTEXPR T&& contained_val() && { return std::move(OptionalBase<T>::storage_.value_); }
+ OPTIONAL_MUTABLE_CONSTEXPR T& contained_val() & { return OptionalBase<T>::storage_.value_; }
+# else
+ T& contained_val() & { return OptionalBase<T>::storage_.value_; }
+ T&& contained_val() && { return std::move(OptionalBase<T>::storage_.value_); }
+# endif
+# else
+ constexpr const T& contained_val() const { return OptionalBase<T>::storage_.value_; }
+ T& contained_val() { return OptionalBase<T>::storage_.value_; }
+# endif
+
+ void clear() __NOEXCEPT {
+ if (initialized()) dataptr()->T::~T();
+ OptionalBase<T>::init_ = false;
+ }
+
+ template <class... Args>
+ void initialize(Args&&... args) __NOEXCEPT_(__NOEXCEPT_(T(std::forward<Args>(args)...)))
+ {
+ ASSERT(!OptionalBase<T>::init_);
+ ::new (static_cast<void*>(dataptr())) T(std::forward<Args>(args)...);
+ OptionalBase<T>::init_ = true;
+ }
+
+ template <class U, class... Args>
+ void initialize(std::initializer_list<U> il, Args&&... args) __NOEXCEPT_(__NOEXCEPT_(T(il, std::forward<Args>(args)...)))
+ {
+ ASSERT(!OptionalBase<T>::init_);
+ ::new (static_cast<void*>(dataptr())) T(il, std::forward<Args>(args)...);
+ OptionalBase<T>::init_ = true;
+ }
+
+public:
+ typedef T value_type;
+
+ // 20.5.5.1, constructors
+ constexpr optional() __NOEXCEPT : OptionalBase<T>() {};
+ constexpr optional(nullopt_t) __NOEXCEPT : OptionalBase<T>() {};
+
+ optional(const optional& rhs)
+ : OptionalBase<T>()
+ {
+ if (rhs.initialized()) {
+ ::new (static_cast<void*>(dataptr())) T(*rhs);
+ OptionalBase<T>::init_ = true;
+ }
+ }
+
+ optional(optional&& rhs) __NOEXCEPT_(detail_::is_nothrow_move_constructible<T>::value)
+ : OptionalBase<T>()
+ {
+ if (rhs.initialized()) {
+ ::new (static_cast<void*>(dataptr())) T(std::move(*rhs));
+ OptionalBase<T>::init_ = true;
+ }
+ }
+
+ constexpr optional(const T& v) : OptionalBase<T>(v) {}
+
+ constexpr optional(T&& v) : OptionalBase<T>(detail_::constexpr_move(v)) {}
+
+ template <class... Args>
+ explicit constexpr optional(in_place_t, Args&&... args)
+ : OptionalBase<T>(in_place_t{}, detail_::constexpr_forward<Args>(args)...) {}
+
+ template <class U, class... Args, TR2_OPTIONAL_REQUIRES(std::is_constructible<T, std::initializer_list<U>>)>
+ OPTIONAL_CONSTEXPR_INIT_LIST explicit optional(in_place_t, std::initializer_list<U> il, Args&&... args)
+ : OptionalBase<T>(in_place_t{}, il, detail_::constexpr_forward<Args>(args)...) {}
+
+ // 20.5.4.2, Destructor
+ ~optional() = default;
+
+ // 20.5.4.3, assignment
+ optional& operator=(nullopt_t) __NOEXCEPT
+ {
+ clear();
+ return *this;
+ }
+
+ optional& operator=(const optional& rhs)
+ {
+ if (initialized() == true && rhs.initialized() == false) clear();
+ else if (initialized() == false && rhs.initialized() == true) initialize(*rhs);
+ else if (initialized() == true && rhs.initialized() == true) contained_val() = *rhs;
+ return *this;
+ }
+
+ optional& operator=(optional&& rhs)
+ __NOEXCEPT_(detail_::is_nothrow_move_assignable<T>::value && detail_::is_nothrow_move_constructible<T>::value)
+ {
+ if (initialized() == true && rhs.initialized() == false) clear();
+ else if (initialized() == false && rhs.initialized() == true) initialize(std::move(*rhs));
+ else if (initialized() == true && rhs.initialized() == true) contained_val() = std::move(*rhs);
+ return *this;
+ }
+
+ template <class U>
+ auto operator=(U&& v)
+ -> typename std::enable_if
+ <
+ std::is_same<typename std::decay<U>::type, T>::value,
+ optional&
+ >::type
+ {
+ if (initialized()) { contained_val() = std::forward<U>(v); }
+ else { initialize(std::forward<U>(v)); }
+ return *this;
+ }
+
+
+ template <class... Args>
+ void emplace(Args&&... args)
+ {
+ clear();
+ initialize(std::forward<Args>(args)...);
+ }
+
+ template <class U, class... Args>
+ void emplace(std::initializer_list<U> il, Args&&... args)
+ {
+ clear();
+ initialize<U, Args...>(il, std::forward<Args>(args)...);
+ }
+
+ // 20.5.4.4, Swap
+ void swap(optional<T>& rhs) __NOEXCEPT_(detail_::is_nothrow_move_constructible<T>::value && __NOEXCEPT_(swap(std::declval<T&>(), std::declval<T&>())))
+ {
+ if (initialized() == true && rhs.initialized() == false) { rhs.initialize(std::move(**this)); clear(); }
+ else if (initialized() == false && rhs.initialized() == true) { initialize(std::move(*rhs)); rhs.clear(); }
+ else if (initialized() == true && rhs.initialized() == true) { using std::swap; swap(**this, *rhs); }
+ }
+
+ // 20.5.4.5, Observers
+
+ explicit constexpr operator bool() const __NOEXCEPT { return initialized(); }
+
+ constexpr T const* operator ->() const {
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), dataptr());
+ }
+
+# if OPTIONAL_HAS_MOVE_ACCESSORS == 1
+
+ OPTIONAL_MUTABLE_CONSTEXPR T* operator ->() {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // CONSTEXPR_ASSERT(initialized());
+ return dataptr();
+ }
+
+ constexpr T const& operator *() const& {
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val());
+ }
+
+ OPTIONAL_MUTABLE_CONSTEXPR T& operator *() & {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // CONSTEXPR_ASSERT(initialized());
+ return contained_val();
+ }
+
+ OPTIONAL_MUTABLE_CONSTEXPR T&& operator *() && {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // CONSTEXPR_ASSERT(initialized());
+ return detail_::constexpr_move(contained_val());
+ }
+
+ constexpr T const& value() const& {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // return initialized() ? contained_val() : (throw bad_optional_access("bad optional access"), contained_val());
+ return contained_val();
+ }
+
+ OPTIONAL_MUTABLE_CONSTEXPR T& value() & {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // return initialized() ? contained_val() : (throw bad_optional_access("bad optional access"), contained_val());
+ return contained_val();
+ }
+
+ OPTIONAL_MUTABLE_CONSTEXPR T&& value() && {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // if (!initialized()) __THROW_EXCEPTION(bad_optional_access("bad optional access"));
+ return std::move(contained_val());
+ }
+
+# else
+
+ T* operator ->() {
+ assert (initialized());
+ return dataptr();
+ }
+
+ constexpr T const& operator *() const {
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val());
+ }
+
+ T& operator *() {
+ assert (initialized());
+ return contained_val();
+ }
+
+ constexpr T const& value() const {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // return initialized() ? contained_val() : (throw bad_optional_access("bad optional access"), contained_val());
+ return contained_val();
+ }
+
+ T& value() {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // return initialized() ? contained_val() : (throw bad_optional_access("bad optional access"), contained_val());
+ return contained_val();
+ }
+
+# endif
+
+# if OPTIONAL_HAS_THIS_RVALUE_REFS == 1
+
+ template <class V>
+ constexpr T value_or(V&& v) const&
+ {
+ return *this ? **this : detail_::convert<T>(detail_::constexpr_forward<V>(v));
+ }
+
+# if OPTIONAL_HAS_MOVE_ACCESSORS == 1
+
+ template <class V>
+ OPTIONAL_MUTABLE_CONSTEXPR T value_or(V&& v) &&
+ {
+ return *this ? detail_::constexpr_move(const_cast<optional<T>&>(*this).contained_val()) : detail_::convert<T>(detail_::constexpr_forward<V>(v));
+ }
+
+# else
+
+ template <class V>
+ T value_or(V&& v) &&
+ {
+ return *this ? detail_::constexpr_move(const_cast<optional<T>&>(*this).contained_val()) : detail_::convert<T>(detail_::constexpr_forward<V>(v));
+ }
+
+# endif
+
+# else
+
+ template <class V>
+ constexpr T value_or(V&& v) const
+ {
+ return *this ? **this : detail_::convert<T>(detail_::constexpr_forward<V>(v));
+ }
+
+# endif
+
+};
+
+
+template <class T>
+class optional<T&>
+{
+ static_assert( !std::is_same<T, nullopt_t>::value, "bad T" );
+ static_assert( !std::is_same<T, in_place_t>::value, "bad T" );
+ T* ref;
+
+public:
+
+ // 20.5.5.1, construction/destruction
+ constexpr optional() __NOEXCEPT : ref(nullptr) {}
+
+ constexpr optional(nullopt_t) __NOEXCEPT : ref(nullptr) {}
+
+ constexpr optional(T& v) __NOEXCEPT : ref(detail_::static_addressof(v)) {}
+
+ optional(T&&) = delete;
+
+ constexpr optional(const optional& rhs) __NOEXCEPT : ref(rhs.ref) {}
+
+ explicit constexpr optional(in_place_t, T& v) __NOEXCEPT : ref(detail_::static_addressof(v)) {}
+
+ explicit optional(in_place_t, T&&) = delete;
+
+ ~optional() = default;
+
+ // 20.5.5.2, mutation
+ optional& operator=(nullopt_t) __NOEXCEPT {
+ ref = nullptr;
+ return *this;
+ }
+
+ // optional& operator=(const optional& rhs) __NOEXCEPT {
+ // ref = rhs.ref;
+ // return *this;
+ // }
+
+ // optional& operator=(optional&& rhs) __NOEXCEPT {
+ // ref = rhs.ref;
+ // return *this;
+ // }
+
+ template <typename U>
+ auto operator=(U&& rhs) __NOEXCEPT
+ -> typename std::enable_if
+ <
+ std::is_same<typename std::decay<U>::type, optional<T&>>::value,
+ optional&
+ >::type
+ {
+ ref = rhs.ref;
+ return *this;
+ }
+
+ template <typename U>
+ auto operator=(U&& rhs) __NOEXCEPT
+ -> typename std::enable_if
+ <
+ !std::is_same<typename std::decay<U>::type, optional<T&>>::value,
+ optional&
+ >::type
+ = delete;
+
+ void emplace(T& v) __NOEXCEPT {
+ ref = detail_::static_addressof(v);
+ }
+
+ void emplace(T&&) = delete;
+
+
+ void swap(optional<T&>& rhs) __NOEXCEPT
+ {
+ std::swap(ref, rhs.ref);
+ }
+
+ // 20.5.5.3, observers
+ constexpr T* operator->() const {
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, ref);
+ }
+
+ constexpr T& operator*() const {
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, *ref);
+ }
+
+ constexpr T& value() const {
+ // FIXME: We need to offer special assert function that can be used under the contexpr context.
+ // return ref ? *ref : (throw bad_optional_access("bad optional access"), *ref);
+ return *ref;
+ }
+
+ explicit constexpr operator bool() const __NOEXCEPT {
+ return ref != nullptr;
+ }
+
+ template <class V>
+ constexpr typename std::decay<T>::type value_or(V&& v) const
+ {
+ return *this ? **this : detail_::convert<typename std::decay<T>::type>(detail_::constexpr_forward<V>(v));
+ }
+};
+
+
+template <class T>
+class optional<T&&>
+{
+ static_assert( sizeof(T) == 0, "optional rvalue references disallowed" );
+};
+
+
+// 20.5.8, Relational operators
+template <class T> constexpr bool operator==(const optional<T>& x, const optional<T>& y)
+{
+ return bool(x) != bool(y) ? false : bool(x) == false ? true : *x == *y;
+}
+
+template <class T> constexpr bool operator!=(const optional<T>& x, const optional<T>& y)
+{
+ return !(x == y);
+}
+
+template <class T> constexpr bool operator<(const optional<T>& x, const optional<T>& y)
+{
+ return (!y) ? false : (!x) ? true : *x < *y;
+}
+
+template <class T> constexpr bool operator>(const optional<T>& x, const optional<T>& y)
+{
+ return (y < x);
+}
+
+template <class T> constexpr bool operator<=(const optional<T>& x, const optional<T>& y)
+{
+ return !(y < x);
+}
+
+template <class T> constexpr bool operator>=(const optional<T>& x, const optional<T>& y)
+{
+ return !(x < y);
+}
+
+
+// 20.5.9, Comparison with nullopt
+template <class T> constexpr bool operator==(const optional<T>& x, nullopt_t) __NOEXCEPT
+{
+ return (!x);
+}
+
+template <class T> constexpr bool operator==(nullopt_t, const optional<T>& x) __NOEXCEPT
+{
+ return (!x);
+}
+
+template <class T> constexpr bool operator!=(const optional<T>& x, nullopt_t) __NOEXCEPT
+{
+ return bool(x);
+}
+
+template <class T> constexpr bool operator!=(nullopt_t, const optional<T>& x) __NOEXCEPT
+{
+ return bool(x);
+}
+
+template <class T> constexpr bool operator<(const optional<T>&, nullopt_t) __NOEXCEPT
+{
+ return false;
+}
+
+template <class T> constexpr bool operator<(nullopt_t, const optional<T>& x) __NOEXCEPT
+{
+ return bool(x);
+}
+
+template <class T> constexpr bool operator<=(const optional<T>& x, nullopt_t) __NOEXCEPT
+{
+ return (!x);
+}
+
+template <class T> constexpr bool operator<=(nullopt_t, const optional<T>&) __NOEXCEPT
+{
+ return true;
+}
+
+template <class T> constexpr bool operator>(const optional<T>& x, nullopt_t) __NOEXCEPT
+{
+ return bool(x);
+}
+
+template <class T> constexpr bool operator>(nullopt_t, const optional<T>&) __NOEXCEPT
+{
+ return false;
+}
+
+template <class T> constexpr bool operator>=(const optional<T>&, nullopt_t) __NOEXCEPT
+{
+ return true;
+}
+
+template <class T> constexpr bool operator>=(nullopt_t, const optional<T>& x) __NOEXCEPT
+{
+ return (!x);
+}
+
+
+
+// 20.5.10, Comparison with T
+template <class T> constexpr bool operator==(const optional<T>& x, const T& v)
+{
+ return bool(x) ? *x == v : false;
+}
+
+template <class T> constexpr bool operator==(const T& v, const optional<T>& x)
+{
+ return bool(x) ? v == *x : false;
+}
+
+template <class T> constexpr bool operator!=(const optional<T>& x, const T& v)
+{
+ return bool(x) ? *x != v : true;
+}
+
+template <class T> constexpr bool operator!=(const T& v, const optional<T>& x)
+{
+ return bool(x) ? v != *x : true;
+}
+
+template <class T> constexpr bool operator<(const optional<T>& x, const T& v)
+{
+ return bool(x) ? *x < v : true;
+}
+
+template <class T> constexpr bool operator>(const T& v, const optional<T>& x)
+{
+ return bool(x) ? v > *x : true;
+}
+
+template <class T> constexpr bool operator>(const optional<T>& x, const T& v)
+{
+ return bool(x) ? *x > v : false;
+}
+
+template <class T> constexpr bool operator<(const T& v, const optional<T>& x)
+{
+ return bool(x) ? v < *x : false;
+}
+
+template <class T> constexpr bool operator>=(const optional<T>& x, const T& v)
+{
+ return bool(x) ? *x >= v : false;
+}
+
+template <class T> constexpr bool operator<=(const T& v, const optional<T>& x)
+{
+ return bool(x) ? v <= *x : false;
+}
+
+template <class T> constexpr bool operator<=(const optional<T>& x, const T& v)
+{
+ return bool(x) ? *x <= v : true;
+}
+
+template <class T> constexpr bool operator>=(const T& v, const optional<T>& x)
+{
+ return bool(x) ? v >= *x : true;
+}
+
+
+// Comparison of optional<T&> with T
+template <class T> constexpr bool operator==(const optional<T&>& x, const T& v)
+{
+ return bool(x) ? *x == v : false;
+}
+
+template <class T> constexpr bool operator==(const T& v, const optional<T&>& x)
+{
+ return bool(x) ? v == *x : false;
+}
+
+template <class T> constexpr bool operator!=(const optional<T&>& x, const T& v)
+{
+ return bool(x) ? *x != v : true;
+}
+
+template <class T> constexpr bool operator!=(const T& v, const optional<T&>& x)
+{
+ return bool(x) ? v != *x : true;
+}
+
+template <class T> constexpr bool operator<(const optional<T&>& x, const T& v)
+{
+ return bool(x) ? *x < v : true;
+}
+
+template <class T> constexpr bool operator>(const T& v, const optional<T&>& x)
+{
+ return bool(x) ? v > *x : true;
+}
+
+template <class T> constexpr bool operator>(const optional<T&>& x, const T& v)
+{
+ return bool(x) ? *x > v : false;
+}
+
+template <class T> constexpr bool operator<(const T& v, const optional<T&>& x)
+{
+ return bool(x) ? v < *x : false;
+}
+
+template <class T> constexpr bool operator>=(const optional<T&>& x, const T& v)
+{
+ return bool(x) ? *x >= v : false;
+}
+
+template <class T> constexpr bool operator<=(const T& v, const optional<T&>& x)
+{
+ return bool(x) ? v <= *x : false;
+}
+
+template <class T> constexpr bool operator<=(const optional<T&>& x, const T& v)
+{
+ return bool(x) ? *x <= v : true;
+}
+
+template <class T> constexpr bool operator>=(const T& v, const optional<T&>& x)
+{
+ return bool(x) ? v >= *x : true;
+}
+
+// Comparison of optional<T const&> with T
+template <class T> constexpr bool operator==(const optional<const T&>& x, const T& v)
+{
+ return bool(x) ? *x == v : false;
+}
+
+template <class T> constexpr bool operator==(const T& v, const optional<const T&>& x)
+{
+ return bool(x) ? v == *x : false;
+}
+
+template <class T> constexpr bool operator!=(const optional<const T&>& x, const T& v)
+{
+ return bool(x) ? *x != v : true;
+}
+
+template <class T> constexpr bool operator!=(const T& v, const optional<const T&>& x)
+{
+ return bool(x) ? v != *x : true;
+}
+
+template <class T> constexpr bool operator<(const optional<const T&>& x, const T& v)
+{
+ return bool(x) ? *x < v : true;
+}
+
+template <class T> constexpr bool operator>(const T& v, const optional<const T&>& x)
+{
+ return bool(x) ? v > *x : true;
+}
+
+template <class T> constexpr bool operator>(const optional<const T&>& x, const T& v)
+{
+ return bool(x) ? *x > v : false;
+}
+
+template <class T> constexpr bool operator<(const T& v, const optional<const T&>& x)
+{
+ return bool(x) ? v < *x : false;
+}
+
+template <class T> constexpr bool operator>=(const optional<const T&>& x, const T& v)
+{
+ return bool(x) ? *x >= v : false;
+}
+
+template <class T> constexpr bool operator<=(const T& v, const optional<const T&>& x)
+{
+ return bool(x) ? v <= *x : false;
+}
+
+template <class T> constexpr bool operator<=(const optional<const T&>& x, const T& v)
+{
+ return bool(x) ? *x <= v : true;
+}
+
+template <class T> constexpr bool operator>=(const T& v, const optional<const T&>& x)
+{
+ return bool(x) ? v >= *x : true;
+}
+
+
+// 20.5.12, Specialized algorithms
+template <class T>
+void swap(optional<T>& x, optional<T>& y) __NOEXCEPT_(__NOEXCEPT_(x.swap(y)))
+{
+ x.swap(y);
+}
+
+
+template <class T>
+constexpr optional<typename std::decay<T>::type> make_optional(T&& v)
+{
+ return optional<typename std::decay<T>::type>(detail_::constexpr_forward<T>(v));
+}
+
+template <class X>
+constexpr optional<X&> make_optional(std::reference_wrapper<X> v)
+{
+ return optional<X&>(v.get());
+}
+
+} // namespace std
+
+namespace WTF {
+
+// -- WebKit Additions --
+template <class OptionalType, class Callback>
+ALWAYS_INLINE
+auto valueOrCompute(OptionalType optional, Callback callback) -> typename OptionalType::value_type
+{
+ if (optional)
+ return *optional;
+ return callback();
+}
+
+} // namespace WTF
+
+namespace std
+{
+ template <typename T>
+ struct hash<std::optional<T>>
+ {
+ typedef typename hash<T>::result_type result_type;
+ typedef std::optional<T> argument_type;
+
+ constexpr result_type operator()(argument_type const& arg) const {
+ return arg ? std::hash<T>{}(*arg) : result_type{};
+ }
+ };
+
+ template <typename T>
+ struct hash<std::optional<T&>>
+ {
+ typedef typename hash<T>::result_type result_type;
+ typedef std::optional<T&> argument_type;
+
+ constexpr result_type operator()(argument_type const& arg) const {
+ return arg ? std::hash<T>{}(*arg) : result_type{};
+ }
+ };
+}
+
+# undef TR2_OPTIONAL_REQUIRES
+# undef TR2_OPTIONAL_ASSERTED_EXPRESSION
+
+using WTF::valueOrCompute;
diff --git a/Source/WTF/wtf/OrderMaker.h b/Source/WTF/wtf/OrderMaker.h
new file mode 100644
index 000000000..12c9de05f
--- /dev/null
+++ b/Source/WTF/wtf/OrderMaker.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_OrderMaker_h
+#define WTF_OrderMaker_h
+
+#include <wtf/Bag.h>
+#include <wtf/HashMap.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/SentinelLinkedList.h>
+
+namespace WTF {
+
+// This is a collection that is meant to be used for building up lists in a certain order. It's
+// not an efficient data structure for storing lists, but if you need to build a list by doing
+// operations like insertBefore(existingValue, newValue), then this class is a good intermediate
+// helper. Note that the type it operates on must be usable as a HashMap key.
+template<typename T>
+class OrderMaker {
+ WTF_MAKE_NONCOPYABLE(OrderMaker);
+
+ struct Node : BasicRawSentinelNode<Node> {
+ Node(SentinelTag)
+ {
+ }
+
+ Node()
+ {
+ }
+
+ T payload;
+ };
+
+public:
+ OrderMaker()
+ {
+ }
+
+ void prepend(T value)
+ {
+ m_list.push(newNode(value));
+ }
+
+ void append(T value)
+ {
+ m_list.append(newNode(value));
+ }
+
+ void insertBefore(T existingValue, T newValue)
+ {
+ Node* node = m_map.get(existingValue);
+ ASSERT(node);
+ node->prepend(newNode(newValue));
+ }
+
+ void insertAfter(T existingValue, T newValue)
+ {
+ Node* node = m_map.get(existingValue);
+ ASSERT(node);
+ node->append(newNode(newValue));
+ }
+
+ class iterator {
+ public:
+ iterator()
+ {
+ }
+
+ iterator(Node* node)
+ : m_node(node)
+ {
+ }
+
+ const T& operator*()
+ {
+ return m_node->payload;
+ }
+
+ iterator& operator++()
+ {
+ m_node = m_node->next();
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_node == other.m_node;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ Node* m_node { nullptr };
+ };
+
+ iterator begin() const { return iterator(const_cast<SentinelLinkedList<Node>&>(m_list).begin()); }
+ iterator end() const { return iterator(const_cast<SentinelLinkedList<Node>&>(m_list).end()); }
+
+private:
+ Node* newNode(T value)
+ {
+ Node* result = m_nodes.add();
+ result->payload = value;
+ m_map.set(value, result);
+ return result;
+ }
+
+ HashMap<T, Node*> m_map;
+ Bag<Node> m_nodes; // FIXME: We could just manually free the contents of the linked list.
+ SentinelLinkedList<Node> m_list;
+};
+
+} // namespace WTF
+
+using WTF::OrderMaker;
+
+#endif // WTF_OrderMaker_h
+
diff --git a/Source/WTF/wtf/OwnPtr.h b/Source/WTF/wtf/OwnPtr.h
deleted file mode 100644
index 111375fbb..000000000
--- a/Source/WTF/wtf/OwnPtr.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public License
- * along with this library; see the file COPYING.LIB. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-#ifndef WTF_OwnPtr_h
-#define WTF_OwnPtr_h
-
-#include <wtf/Assertions.h>
-#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtrCommon.h>
-#include <algorithm>
-#include <cstddef>
-#include <memory>
-
-namespace WTF {
-
- template<typename T> class PassOwnPtr;
- template<typename T> PassOwnPtr<T> adoptPtr(T*);
-
- template<typename T> class OwnPtr {
- public:
- typedef T ValueType;
- typedef ValueType* PtrType;
-
- OwnPtr() : m_ptr(0) { }
- OwnPtr(std::nullptr_t) : m_ptr(0) { }
-
- // See comment in PassOwnPtr.h for why this takes a const reference.
- template<typename U> OwnPtr(const PassOwnPtr<U>& o);
-
- ~OwnPtr() { deleteOwnedPtr(m_ptr); }
-
- PtrType get() const { return m_ptr; }
-
- void clear();
- PassOwnPtr<T> release();
- PtrType leakPtr() WARN_UNUSED_RETURN;
-
- ValueType& operator*() const { ASSERT(m_ptr); return *m_ptr; }
- PtrType operator->() const { ASSERT(m_ptr); return m_ptr; }
-
- bool operator!() const { return !m_ptr; }
-
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef PtrType OwnPtr::*UnspecifiedBoolType;
- operator UnspecifiedBoolType() const { return m_ptr ? &OwnPtr::m_ptr : 0; }
-
- OwnPtr& operator=(const PassOwnPtr<T>&);
- OwnPtr& operator=(std::nullptr_t) { clear(); return *this; }
- template<typename U> OwnPtr& operator=(const PassOwnPtr<U>&);
-
- OwnPtr(OwnPtr&&);
- template<typename U> OwnPtr(OwnPtr<U>&&);
-
- OwnPtr& operator=(OwnPtr&&);
- template<typename U> OwnPtr& operator=(OwnPtr<U>&&);
-
- void swap(OwnPtr& o) { std::swap(m_ptr, o.m_ptr); }
-
- private:
- explicit OwnPtr(PtrType ptr) : m_ptr(ptr) { }
-
- // We should never have two OwnPtrs for the same underlying object (otherwise we'll get
- // double-destruction), so these equality operators should never be needed.
- template<typename U> bool operator==(const OwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
- template<typename U> bool operator!=(const OwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
- template<typename U> bool operator==(const PassOwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
- template<typename U> bool operator!=(const PassOwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
-
- PtrType m_ptr;
- };
-
- template<typename T> template<typename U> inline OwnPtr<T>::OwnPtr(const PassOwnPtr<U>& o)
- : m_ptr(o.leakPtr())
- {
- }
-
- template<typename T> inline void OwnPtr<T>::clear()
- {
- PtrType ptr = m_ptr;
- m_ptr = 0;
- deleteOwnedPtr(ptr);
- }
-
- template<typename T> inline PassOwnPtr<T> OwnPtr<T>::release()
- {
- PtrType ptr = m_ptr;
- m_ptr = 0;
- return adoptPtr(ptr);
- }
-
- template<typename T> inline typename OwnPtr<T>::PtrType OwnPtr<T>::leakPtr()
- {
- PtrType ptr = m_ptr;
- m_ptr = 0;
- return ptr;
- }
-
- template<typename T> inline OwnPtr<T>& OwnPtr<T>::operator=(const PassOwnPtr<T>& o)
- {
- PtrType ptr = m_ptr;
- m_ptr = o.leakPtr();
- ASSERT(!ptr || m_ptr != ptr);
- deleteOwnedPtr(ptr);
- return *this;
- }
-
- template<typename T> template<typename U> inline OwnPtr<T>& OwnPtr<T>::operator=(const PassOwnPtr<U>& o)
- {
- PtrType ptr = m_ptr;
- m_ptr = o.leakPtr();
- ASSERT(!ptr || m_ptr != ptr);
- deleteOwnedPtr(ptr);
- return *this;
- }
-
- template<typename T> inline OwnPtr<T>::OwnPtr(OwnPtr<T>&& o)
- : m_ptr(o.leakPtr())
- {
- }
-
- template<typename T> template<typename U> inline OwnPtr<T>::OwnPtr(OwnPtr<U>&& o)
- : m_ptr(o.leakPtr())
- {
- }
-
- template<typename T> inline auto OwnPtr<T>::operator=(OwnPtr&& o) -> OwnPtr&
- {
- ASSERT(!o || o != m_ptr);
- OwnPtr ptr = std::move(o);
- swap(ptr);
- return *this;
- }
-
- template<typename T> template<typename U> inline auto OwnPtr<T>::operator=(OwnPtr<U>&& o) -> OwnPtr&
- {
- ASSERT(!o || o != m_ptr);
- OwnPtr ptr = std::move(o);
- swap(ptr);
- return *this;
- }
-
- template<typename T> inline void swap(OwnPtr<T>& a, OwnPtr<T>& b)
- {
- a.swap(b);
- }
-
- template<typename T, typename U> inline bool operator==(const OwnPtr<T>& a, U* b)
- {
- return a.get() == b;
- }
-
- template<typename T, typename U> inline bool operator==(T* a, const OwnPtr<U>& b)
- {
- return a == b.get();
- }
-
- template<typename T, typename U> inline bool operator!=(const OwnPtr<T>& a, U* b)
- {
- return a.get() != b;
- }
-
- template<typename T, typename U> inline bool operator!=(T* a, const OwnPtr<U>& b)
- {
- return a != b.get();
- }
-
- template<typename T> inline typename OwnPtr<T>::PtrType getPtr(const OwnPtr<T>& p)
- {
- return p.get();
- }
-
-} // namespace WTF
-
-using WTF::OwnPtr;
-
-#endif // WTF_OwnPtr_h
diff --git a/Source/WTF/wtf/OwnPtrCommon.h b/Source/WTF/wtf/OwnPtrCommon.h
deleted file mode 100644
index c31847b8b..000000000
--- a/Source/WTF/wtf/OwnPtrCommon.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
- * Copyright (C) 2009 Torch Mobile, Inc.
- * Copyright (C) 2010 Company 100 Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef WTF_OwnPtrCommon_h
-#define WTF_OwnPtrCommon_h
-
-#if PLATFORM(EFL)
-typedef struct _Ecore_Evas Ecore_Evas;
-typedef struct _Ecore_IMF_Context Ecore_IMF_Context;
-typedef struct _Ecore_Pipe Ecore_Pipe;
-typedef struct _Eina_Hash Eina_Hash;
-typedef struct _Eina_Module Eina_Module;
-#if USE(EO)
-typedef struct _Eo_Opaque Evas_Object;
-#else
-typedef struct _Evas_Object Evas_Object;
-#endif
-#if USE(ACCELERATED_COMPOSITING)
-typedef struct _Evas_GL Evas_GL;
-#endif
-#endif
-
-namespace WTF {
-
- template <typename T> inline void deleteOwnedPtr(T* ptr)
- {
- typedef char known[sizeof(T) ? 1 : -1];
- if (sizeof(known))
- delete ptr;
- }
-
-#if PLATFORM(EFL)
- WTF_EXPORT_PRIVATE void deleteOwnedPtr(Ecore_Evas*);
- WTF_EXPORT_PRIVATE void deleteOwnedPtr(Ecore_IMF_Context*);
- WTF_EXPORT_PRIVATE void deleteOwnedPtr(Ecore_Pipe*);
- WTF_EXPORT_PRIVATE void deleteOwnedPtr(Eina_Hash*);
- WTF_EXPORT_PRIVATE void deleteOwnedPtr(Eina_Module*);
- WTF_EXPORT_PRIVATE void deleteOwnedPtr(Evas_Object*);
-#if USE(ACCELERATED_COMPOSITING)
- WTF_EXPORT_PRIVATE void deleteOwnedPtr(Evas_GL*);
-#endif
-#endif
-
-} // namespace WTF
-
-#endif // WTF_OwnPtrCommon_h
diff --git a/Source/WTF/wtf/PageAllocationAligned.cpp b/Source/WTF/wtf/PageAllocationAligned.cpp
deleted file mode 100644
index bdb976b1b..000000000
--- a/Source/WTF/wtf/PageAllocationAligned.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "PageAllocationAligned.h"
-
-namespace WTF {
-
-PageAllocationAligned PageAllocationAligned::allocate(size_t size, size_t alignment, OSAllocator::Usage usage, bool writable)
-{
- ASSERT(isPageAligned(size));
- ASSERT(isPageAligned(alignment));
- ASSERT(isPowerOfTwo(alignment));
- ASSERT(size >= alignment);
- size_t alignmentMask = alignment - 1;
-
-#if OS(DARWIN)
- int flags = VM_FLAGS_ANYWHERE;
- if (usage != OSAllocator::UnknownUsage)
- flags |= usage;
- int protection = PROT_READ;
- if (writable)
- protection |= PROT_WRITE;
-
- vm_address_t address = 0;
- vm_map(current_task(), &address, size, alignmentMask, flags, MEMORY_OBJECT_NULL, 0, FALSE, protection, PROT_READ | PROT_WRITE, VM_INHERIT_DEFAULT);
- return PageAllocationAligned(reinterpret_cast<void*>(address), size);
-#else
- size_t alignmentDelta = alignment - pageSize();
-
- // Resererve with suffcient additional VM to correctly align.
- size_t reservationSize = size + alignmentDelta;
- void* reservationBase = OSAllocator::reserveUncommitted(reservationSize, usage, writable, false);
-
- // Select an aligned region within the reservation and commit.
- void* alignedBase = reinterpret_cast<uintptr_t>(reservationBase) & alignmentMask
- ? reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(reservationBase) & ~alignmentMask) + alignment)
- : reservationBase;
- OSAllocator::commit(alignedBase, size, writable, false);
-
- return PageAllocationAligned(alignedBase, size, reservationBase, reservationSize);
-#endif
-}
-
-void PageAllocationAligned::deallocate()
-{
- // Clear base & size before calling release; if this is *inside* allocation
- // then we won't be able to clear then after deallocating the memory.
- PageAllocationAligned tmp;
- std::swap(tmp, *this);
-
- ASSERT(tmp);
- ASSERT(!*this);
-
-#if OS(DARWIN)
- vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(tmp.base()), tmp.size());
-#else
- ASSERT(tmp.m_reservation.contains(tmp.base(), tmp.size()));
- OSAllocator::decommitAndRelease(tmp.m_reservation.base(), tmp.m_reservation.size(), tmp.base(), tmp.size());
-#endif
-}
-
-} // namespace WTF
diff --git a/Source/WTF/wtf/PageBlock.h b/Source/WTF/wtf/PageBlock.h
index 56e557017..372c8b600 100644
--- a/Source/WTF/wtf/PageBlock.h
+++ b/Source/WTF/wtf/PageBlock.h
@@ -82,7 +82,6 @@ inline PageBlock::PageBlock(void* base, size_t size, bool hasGuardPages)
using WTF::pageSize;
using WTF::isPageAligned;
-using WTF::isPageAligned;
using WTF::isPowerOfTwo;
#endif // PageBlock_h
diff --git a/Source/WTF/wtf/PageReservation.h b/Source/WTF/wtf/PageReservation.h
index 77783ebcc..66f472565 100644
--- a/Source/WTF/wtf/PageReservation.h
+++ b/Source/WTF/wtf/PageReservation.h
@@ -65,14 +65,7 @@ public:
using PageBlock::base;
using PageBlock::size;
-
-#ifndef __clang__
using PageBlock::operator bool;
-#else
- // FIXME: This is a workaround for <rdar://problem/8876150>, wherein Clang incorrectly emits an access
- // control warning when a client tries to use operator bool exposed above via "using PageBlock::operator bool".
- operator bool() const { return PageBlock::operator bool(); }
-#endif
void commit(void* start, size_t size)
{
diff --git a/Source/WTF/wtf/ParallelHelperPool.cpp b/Source/WTF/wtf/ParallelHelperPool.cpp
new file mode 100644
index 000000000..8d0f9e4b3
--- /dev/null
+++ b/Source/WTF/wtf/ParallelHelperPool.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ParallelHelperPool.h"
+
+#include "AutomaticThread.h"
+#include "DataLog.h"
+#include "StringPrintStream.h"
+
+namespace WTF {
+
+ParallelHelperClient::ParallelHelperClient(RefPtr<ParallelHelperPool> pool)
+ : m_pool(pool)
+{
+ LockHolder locker(*m_pool->m_lock);
+ RELEASE_ASSERT(!m_pool->m_isDying);
+ m_pool->m_clients.append(this);
+}
+
+ParallelHelperClient::~ParallelHelperClient()
+{
+ LockHolder locker(*m_pool->m_lock);
+ finish(locker);
+
+ for (size_t i = 0; i < m_pool->m_clients.size(); ++i) {
+ if (m_pool->m_clients[i] == this) {
+ m_pool->m_clients[i] = m_pool->m_clients.last();
+ m_pool->m_clients.removeLast();
+ break;
+ }
+ }
+}
+
+void ParallelHelperClient::setTask(RefPtr<SharedTask<void ()>> task)
+{
+ LockHolder locker(*m_pool->m_lock);
+ RELEASE_ASSERT(!m_task);
+ m_task = task;
+ m_pool->didMakeWorkAvailable(locker);
+}
+
+void ParallelHelperClient::finish()
+{
+ LockHolder locker(*m_pool->m_lock);
+ finish(locker);
+}
+
+void ParallelHelperClient::doSomeHelping()
+{
+ RefPtr<SharedTask<void ()>> task;
+ {
+ LockHolder locker(*m_pool->m_lock);
+ task = claimTask(locker);
+ if (!task)
+ return;
+ }
+
+ runTask(task);
+}
+
+void ParallelHelperClient::runTaskInParallel(RefPtr<SharedTask<void ()>> task)
+{
+ setTask(task);
+ doSomeHelping();
+ finish();
+}
+
+void ParallelHelperClient::finish(const AbstractLocker&)
+{
+ m_task = nullptr;
+ while (m_numActive)
+ m_pool->m_workCompleteCondition.wait(*m_pool->m_lock);
+}
+
+RefPtr<SharedTask<void ()>> ParallelHelperClient::claimTask(const AbstractLocker&)
+{
+ if (!m_task)
+ return nullptr;
+
+ m_numActive++;
+ return m_task;
+}
+
+void ParallelHelperClient::runTask(RefPtr<SharedTask<void ()>> task)
+{
+ RELEASE_ASSERT(m_numActive);
+ RELEASE_ASSERT(task);
+
+ task->run();
+
+ {
+ LockHolder locker(*m_pool->m_lock);
+ RELEASE_ASSERT(m_numActive);
+ // No new task could have been installed, since we were still active.
+ RELEASE_ASSERT(!m_task || m_task == task);
+ m_task = nullptr;
+ m_numActive--;
+ if (!m_numActive)
+ m_pool->m_workCompleteCondition.notifyAll();
+ }
+}
+
+ParallelHelperPool::ParallelHelperPool()
+ : m_lock(Box<Lock>::create())
+ , m_workAvailableCondition(AutomaticThreadCondition::create())
+{
+}
+
+ParallelHelperPool::~ParallelHelperPool()
+{
+ RELEASE_ASSERT(m_clients.isEmpty());
+
+ {
+ LockHolder locker(*m_lock);
+ m_isDying = true;
+ m_workAvailableCondition->notifyAll(locker);
+ }
+
+ for (RefPtr<AutomaticThread>& thread : m_threads)
+ thread->join();
+}
+
+void ParallelHelperPool::ensureThreads(unsigned numThreads)
+{
+ LockHolder locker(*m_lock);
+ if (numThreads < m_numThreads)
+ return;
+ m_numThreads = numThreads;
+ if (getClientWithTask(locker))
+ didMakeWorkAvailable(locker);
+}
+
+void ParallelHelperPool::doSomeHelping()
+{
+ ParallelHelperClient* client;
+ RefPtr<SharedTask<void ()>> task;
+ {
+ LockHolder locker(*m_lock);
+ client = getClientWithTask(locker);
+ if (!client)
+ return;
+ task = client->claimTask(locker);
+ }
+
+ client->runTask(task);
+}
+
+class ParallelHelperPool::Thread : public AutomaticThread {
+public:
+ Thread(const AbstractLocker& locker, ParallelHelperPool& pool)
+ : AutomaticThread(locker, pool.m_lock, pool.m_workAvailableCondition)
+ , m_pool(pool)
+ {
+ }
+
+protected:
+ PollResult poll(const AbstractLocker& locker) override
+ {
+ if (m_pool.m_isDying)
+ return PollResult::Stop;
+ m_client = m_pool.getClientWithTask(locker);
+ if (m_client) {
+ m_task = m_client->claimTask(locker);
+ return PollResult::Work;
+ }
+ return PollResult::Wait;
+ }
+
+ WorkResult work() override
+ {
+ m_client->runTask(m_task);
+ m_client = nullptr;
+ m_task = nullptr;
+ return WorkResult::Continue;
+ }
+
+private:
+ ParallelHelperPool& m_pool;
+ ParallelHelperClient* m_client { nullptr };
+ RefPtr<SharedTask<void ()>> m_task;
+};
+
+void ParallelHelperPool::didMakeWorkAvailable(const AbstractLocker& locker)
+{
+ while (m_numThreads > m_threads.size())
+ m_threads.append(adoptRef(new Thread(locker, *this)));
+ m_workAvailableCondition->notifyAll(locker);
+}
+
+bool ParallelHelperPool::hasClientWithTask(const AbstractLocker& locker)
+{
+ return !!getClientWithTask(locker);
+}
+
+ParallelHelperClient* ParallelHelperPool::getClientWithTask(const AbstractLocker&)
+{
+ // We load-balance by being random.
+ unsigned startIndex = m_random.getUint32(m_clients.size());
+ for (unsigned index = startIndex; index < m_clients.size(); ++index) {
+ ParallelHelperClient* client = m_clients[index];
+ if (client->m_task)
+ return client;
+ }
+ for (unsigned index = 0; index < startIndex; ++index) {
+ ParallelHelperClient* client = m_clients[index];
+ if (client->m_task)
+ return client;
+ }
+
+ return nullptr;
+}
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/ParallelHelperPool.h b/Source/WTF/wtf/ParallelHelperPool.h
new file mode 100644
index 000000000..21859cb2b
--- /dev/null
+++ b/Source/WTF/wtf/ParallelHelperPool.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ParallelHelperPool_h
+#define ParallelHelperPool_h
+
+#include <wtf/Box.h>
+#include <wtf/Condition.h>
+#include <wtf/Lock.h>
+#include <wtf/RefPtr.h>
+#include <wtf/SharedTask.h>
+#include <wtf/ThreadSafeRefCounted.h>
+#include <wtf/Threading.h>
+#include <wtf/Vector.h>
+#include <wtf/WeakRandom.h>
+
+namespace WTF {
+
+class AutomaticThread;
+class AutomaticThreadCondition;
+
+// A ParallelHelperPool is a shared pool of threads that can be asked to help with some finite-time
+// parallel activity. It's designed to work well when there are multiple concurrent tasks that may
+// all want parallel help. In that case, we don't want each task to start its own thread pool. It's
+// also designed to work well for tasks that do their own load balancing and do not wish to
+// participate in microtask-style load balancing.
+//
+// A pool can have many clients, and each client may have zero or one tasks. The pool will have up
+// to some number of threads, configurable with ParallelHelperPool::addThreads(); usually you bound
+// this by the number of CPUs. Whenever a thread is idle and it notices that some client has a
+// task, it will run the task. A task may be run on anywhere between zero and N threads, where N is
+// the number of threads in the pool. Tasks run to completion. It's expected that a task will have
+// its own custom ideas about how to participate in some parallel activity's load balancing, and it
+// will return when the parallel activity is done. For example, a parallel marking task will return
+// when the mark phase is done.
+//
+// Threads may have a choice between many tasks, since there may be many clients and each client
+// may have a task. For the marking example, that may happen if there are multiple VM instances and
+// each instance decides to start parallel marking at the same time. In that case, threads choose
+// a task at random. So long as any client has a task, all threads in the pool will continue
+// running the available tasks. Threads go idle when no client has tasks to run.
+
+class ParallelHelperPool;
+
+// A client is a placeholder for a parallel algorithm. A parallel algorithm will have a task that
+// can be run concurrently. Whenever a client has a task set (you have called setTask() or
+// setFunction()), threads in the pool may run that task. If a task returns on any thread, the
+// client will assume that the task is done and will clear the task. If the task is cleared (the
+// task runs to completion on any thread or you call finish()), any threads in the pool already
+// running the last set task(s) will continue to run them. You can wait for all of them to finish
+// by calling finish(). That method will clear the task and wait for any threads running the last
+// set task to finish. There are two known-good patterns for using a client:
+//
+// 1) Tasks intrinsically know when the algorithm reaches termination, and simply returns when
+// this happens. The main thread runs the task by doing:
+//
+// client->setFunction(
+// [=] () {
+// do things;
+// });
+// client->doSomeHelping();
+// client->finish();
+//
+// Calling doSomeHelping() ensures that the algorithm runs on at least one thread (this one).
+// Tasks will know when to complete, and will return when they are done. This will clear the
+// task to ensure that no new threads will run the task. Then, finish() clears the current task
+// and waits for any parallel tasks to finish after the main thread has finished. It's possible
+// for threads to still be running the last set task (i.e. the one set by setFunction()) even
+// after the task has been cleared. Waiting for idle ensures that no old tasks are running
+// anymore.
+//
+// You can do this more easily by using the runFunctionInParallel() helper:
+//
+// clients->runFunctionInParallel(
+// [=] () {
+// do things;
+// });
+//
+// 2) Tasks keep doing things until they are told to quit using some custom notification mechanism.
+// The main thread runs the task by doing:
+//
+// bool keepGoing = true;
+// client->setFunction(
+// [=] () {
+// while (keepGoing) {
+// do things;
+// }
+// });
+//
+// When work runs out, the main thread will inform tasks that there is no more work, and then
+// wait until no more tasks are running:
+//
+// keepGoing = false;
+// client->finish();
+//
+// This works best when the main thread doesn't actually want to run the task that it set in the
+// client. This happens for example in parallel marking. The main thread uses a somewhat
+// different marking algorithm than the helpers. The main thread may provide work that the
+// helpers steal. The main thread knows when termination is reached, and simply tells the
+// helpers to stop upon termination.
+//
+// The known-good styles of using ParallelHelperClient all involve a parallel algorithm that has
+// its own work distribution and load balancing.
+//
+// Note that it is not valid to use the same ParallelHelperClient instance from multiple threads.
+// Each thread should have its own ParallelHelperClient in that case. Failure to follow this advice
+// will lead to RELEASE_ASSERT's or worse.
+class ParallelHelperClient {
+ WTF_MAKE_NONCOPYABLE(ParallelHelperClient);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ WTF_EXPORT_PRIVATE ParallelHelperClient(RefPtr<ParallelHelperPool>);
+ WTF_EXPORT_PRIVATE ~ParallelHelperClient();
+
+ WTF_EXPORT_PRIVATE void setTask(RefPtr<SharedTask<void ()>>);
+
+ template<typename Functor>
+ void setFunction(const Functor& functor)
+ {
+ setTask(createSharedTask<void ()>(functor));
+ }
+
+ WTF_EXPORT_PRIVATE void finish();
+
+ WTF_EXPORT_PRIVATE void doSomeHelping();
+
+ // Equivalent to:
+ // client->setTask(task);
+ // client->doSomeHelping();
+ // client->finish();
+ WTF_EXPORT_PRIVATE void runTaskInParallel(RefPtr<SharedTask<void ()>>);
+
+ // Equivalent to:
+ // client->setFunction(functor);
+ // client->doSomeHelping();
+ // client->finish();
+ template<typename Functor>
+ void runFunctionInParallel(const Functor& functor)
+ {
+ runTaskInParallel(createSharedTask<void ()>(functor));
+ }
+
+ ParallelHelperPool& pool() { return *m_pool; }
+ unsigned numberOfActiveThreads() const { return m_numActive; }
+
+private:
+ friend class ParallelHelperPool;
+
+ void finish(const AbstractLocker&);
+ RefPtr<SharedTask<void ()>> claimTask(const AbstractLocker&);
+ void runTask(RefPtr<SharedTask<void ()>>);
+
+ RefPtr<ParallelHelperPool> m_pool;
+ RefPtr<SharedTask<void ()>> m_task;
+ unsigned m_numActive { 0 };
+};
+
+class ParallelHelperPool : public ThreadSafeRefCounted<ParallelHelperPool> {
+public:
+ WTF_EXPORT_PRIVATE ParallelHelperPool();
+ WTF_EXPORT_PRIVATE ~ParallelHelperPool();
+
+ WTF_EXPORT_PRIVATE void ensureThreads(unsigned numThreads);
+
+ unsigned numberOfThreads() const { return m_numThreads; }
+
+ WTF_EXPORT_PRIVATE void doSomeHelping();
+
+private:
+ friend class ParallelHelperClient;
+ class Thread;
+ friend class Thread;
+
+ void didMakeWorkAvailable(const AbstractLocker&);
+
+ bool hasClientWithTask(const AbstractLocker&);
+ ParallelHelperClient* getClientWithTask(const AbstractLocker&);
+ ParallelHelperClient* waitForClientWithTask(const AbstractLocker&);
+
+ Box<Lock> m_lock; // AutomaticThread wants this in a box for safety.
+ RefPtr<AutomaticThreadCondition> m_workAvailableCondition;
+ Condition m_workCompleteCondition;
+
+ WeakRandom m_random;
+
+ Vector<ParallelHelperClient*> m_clients;
+ Vector<RefPtr<AutomaticThread>> m_threads;
+ unsigned m_numThreads { 0 }; // This can be larger than m_threads.size() because we start threads only once there is work.
+ bool m_isDying { false };
+};
+
+} // namespace WTF
+
+using WTF::ParallelHelperClient;
+using WTF::ParallelHelperPool;
+
+#endif // ParallelHelperPool_h
+
diff --git a/Source/WTF/wtf/ParallelJobsGeneric.cpp b/Source/WTF/wtf/ParallelJobsGeneric.cpp
index 2cc0bc643..4023f43e6 100644
--- a/Source/WTF/wtf/ParallelJobsGeneric.cpp
+++ b/Source/WTF/wtf/ParallelJobsGeneric.cpp
@@ -105,17 +105,17 @@ bool ParallelEnvironment::ThreadPrivate::tryLockFor(ParallelEnvironment* parent)
void ParallelEnvironment::ThreadPrivate::execute(ThreadFunction threadFunction, void* parameters)
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
m_threadFunction = threadFunction;
m_parameters = parameters;
m_running = true;
- m_threadCondition.signal();
+ m_threadCondition.notifyOne();
}
void ParallelEnvironment::ThreadPrivate::waitForFinish()
{
- MutexLocker lock(m_mutex);
+ LockHolder lock(m_mutex);
while (m_running)
m_threadCondition.wait(m_mutex);
@@ -124,14 +124,14 @@ void ParallelEnvironment::ThreadPrivate::waitForFinish()
void ParallelEnvironment::ThreadPrivate::workerThread(void* threadData)
{
ThreadPrivate* sharedThread = reinterpret_cast<ThreadPrivate*>(threadData);
- MutexLocker lock(sharedThread->m_mutex);
+ LockHolder lock(sharedThread->m_mutex);
while (sharedThread->m_threadID) {
if (sharedThread->m_running) {
(*sharedThread->m_threadFunction)(sharedThread->m_parameters);
sharedThread->m_running = false;
sharedThread->m_parent = 0;
- sharedThread->m_threadCondition.signal();
+ sharedThread->m_threadCondition.notifyOne();
}
sharedThread->m_threadCondition.wait(sharedThread->m_mutex);
diff --git a/Source/WTF/wtf/ParallelJobsGeneric.h b/Source/WTF/wtf/ParallelJobsGeneric.h
index 731b9604a..52f4799f1 100644
--- a/Source/WTF/wtf/ParallelJobsGeneric.h
+++ b/Source/WTF/wtf/ParallelJobsGeneric.h
@@ -30,6 +30,8 @@
#if ENABLE(THREADING_GENERIC)
+#include <wtf/Condition.h>
+#include <wtf/Lock.h>
#include <wtf/RefCounted.h>
#include <wtf/Threading.h>
@@ -64,9 +66,9 @@ public:
void waitForFinish();
- static PassRefPtr<ThreadPrivate> create()
+ static Ref<ThreadPrivate> create()
{
- return adoptRef(new ThreadPrivate());
+ return adoptRef(*new ThreadPrivate());
}
static void workerThread(void*);
@@ -76,8 +78,8 @@ public:
bool m_running;
ParallelEnvironment* m_parent;
- mutable Mutex m_mutex;
- ThreadCondition m_threadCondition;
+ mutable Lock m_mutex;
+ Condition m_threadCondition;
ThreadFunction m_threadFunction;
void* m_parameters;
diff --git a/Source/WTF/wtf/ParallelVectorIterator.h b/Source/WTF/wtf/ParallelVectorIterator.h
new file mode 100644
index 000000000..9a4f6ac4f
--- /dev/null
+++ b/Source/WTF/wtf/ParallelVectorIterator.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ParallelVectorIterator_h
+#define ParallelVectorIterator_h
+
+#include <wtf/FastMalloc.h>
+#include <wtf/Lock.h>
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+template<typename VectorType>
+class ParallelVectorIterator {
+ WTF_MAKE_NONCOPYABLE(ParallelVectorIterator);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ ParallelVectorIterator(VectorType& vector, size_t shardSize)
+ : m_vector(vector)
+ , m_shardSize(shardSize)
+ {
+ }
+
+ template<typename Functor>
+ ALWAYS_INLINE void iterate(const Functor& functor)
+ {
+ for (;;) {
+ size_t begin;
+ size_t end;
+ {
+ LockHolder locker(m_lock);
+ begin = m_next;
+ if (begin == m_vector.size())
+ return;
+ if (m_vector.size() - begin < m_shardSize)
+ end = m_vector.size();
+ else
+ end = begin + m_shardSize;
+ RELEASE_ASSERT(end <= m_vector.size());
+ RELEASE_ASSERT(begin < end);
+ m_next = end;
+ }
+
+ for (size_t i = begin; i < end; ++i)
+ functor(m_vector[i]);
+ }
+ }
+private:
+ VectorType& m_vector;
+ Lock m_lock;
+ size_t m_shardSize;
+ size_t m_next { 0 };
+};
+
+} // namespace WTF
+
+using WTF::ParallelVectorIterator;
+
+#endif // ParallelVectorIterator_h
+
diff --git a/Source/WTF/wtf/ParkingLot.cpp b/Source/WTF/wtf/ParkingLot.cpp
new file mode 100644
index 000000000..e6c678bef
--- /dev/null
+++ b/Source/WTF/wtf/ParkingLot.cpp
@@ -0,0 +1,813 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ParkingLot.h"
+
+#include "CurrentTime.h"
+#include "DataLog.h"
+#include "HashFunctions.h"
+#include "StringPrintStream.h"
+#include "ThreadSpecific.h"
+#include "ThreadingPrimitives.h"
+#include "Vector.h"
+#include "WeakRandom.h"
+#include "WordLock.h"
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+namespace WTF {
+
+namespace {
+
+const bool verbose = false;
+
+struct ThreadData : public ThreadSafeRefCounted<ThreadData> {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+
+ ThreadData();
+ ~ThreadData();
+
+ ThreadIdentifier threadIdentifier;
+
+ Mutex parkingLock;
+ ThreadCondition parkingCondition;
+
+ const void* address { nullptr };
+
+ ThreadData* nextInQueue { nullptr };
+
+ intptr_t token { 0 };
+};
+
+enum class DequeueResult {
+ Ignore,
+ RemoveAndContinue,
+ RemoveAndStop
+};
+
+struct Bucket {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ Bucket()
+ : random(static_cast<unsigned>(bitwise_cast<intptr_t>(this))) // Cannot use default seed since that recurses into Lock.
+ {
+ }
+
+ void enqueue(ThreadData* data)
+ {
+ if (verbose)
+ dataLog(toString(currentThread(), ": enqueueing ", RawPointer(data), " with address = ", RawPointer(data->address), " onto ", RawPointer(this), "\n"));
+ ASSERT(data->address);
+ ASSERT(!data->nextInQueue);
+
+ if (queueTail) {
+ queueTail->nextInQueue = data;
+ queueTail = data;
+ return;
+ }
+
+ queueHead = data;
+ queueTail = data;
+ }
+
+ template<typename Functor>
+ void genericDequeue(const Functor& functor)
+ {
+ if (verbose)
+ dataLog(toString(currentThread(), ": dequeueing from bucket at ", RawPointer(this), "\n"));
+
+ if (!queueHead) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": empty.\n"));
+ return;
+ }
+
+ // This loop is a very clever abomination. The induction variables are the pointer to the
+ // pointer to the current node, and the pointer to the previous node. This gives us everything
+ // we need to both proceed forward to the next node, and to remove nodes while maintaining the
+ // queueHead/queueTail and all of the nextInQueue links. For example, when we are at the head
+ // element, then removal means rewiring queueHead, and if it was also equal to queueTail, then
+ // we'd want queueTail to be set to nullptr. This works because:
+ //
+ // currentPtr == &queueHead
+ // previous == nullptr
+ //
+ // We remove by setting *currentPtr = (*currentPtr)->nextInQueue, i.e. changing the pointer
+ // that used to point to this node to instead point to this node's successor. Another example:
+ // if we were at the second node in the queue, then we'd have:
+ //
+ // currentPtr == &queueHead->nextInQueue
+ // previous == queueHead
+ //
+ // If this node is not equal to queueTail, then removing it simply means making
+ // queueHead->nextInQueue point to queueHead->nextInQueue->nextInQueue (which the algorithm
+ // achieves by mutating *currentPtr). If this node is equal to queueTail, then we want to set
+ // queueTail to previous, which in this case is queueHead - thus making the queue look like a
+ // proper one-element queue with queueHead == queueTail.
+ bool shouldContinue = true;
+ ThreadData** currentPtr = &queueHead;
+ ThreadData* previous = nullptr;
+
+ double time = monotonicallyIncreasingTimeMS();
+ bool timeToBeFair = false;
+ if (time > nextFairTime)
+ timeToBeFair = true;
+
+ bool didDequeue = false;
+
+ while (shouldContinue) {
+ ThreadData* current = *currentPtr;
+ if (verbose)
+ dataLog(toString(currentThread(), ": got thread ", RawPointer(current), "\n"));
+ if (!current)
+ break;
+ DequeueResult result = functor(current, timeToBeFair);
+ switch (result) {
+ case DequeueResult::Ignore:
+ if (verbose)
+ dataLog(toString(currentThread(), ": currentPtr = ", RawPointer(currentPtr), ", *currentPtr = ", RawPointer(*currentPtr), "\n"));
+ previous = current;
+ currentPtr = &(*currentPtr)->nextInQueue;
+ break;
+ case DequeueResult::RemoveAndStop:
+ shouldContinue = false;
+ FALLTHROUGH;
+ case DequeueResult::RemoveAndContinue:
+ if (verbose)
+ dataLog(toString(currentThread(), ": dequeueing ", RawPointer(current), " from ", RawPointer(this), "\n"));
+ if (current == queueTail)
+ queueTail = previous;
+ didDequeue = true;
+ *currentPtr = current->nextInQueue;
+ current->nextInQueue = nullptr;
+ break;
+ }
+ }
+
+ if (timeToBeFair && didDequeue)
+ nextFairTime = time + random.get();
+
+ ASSERT(!!queueHead == !!queueTail);
+ }
+
+ ThreadData* dequeue()
+ {
+ ThreadData* result = nullptr;
+ genericDequeue(
+ [&] (ThreadData* element, bool) -> DequeueResult {
+ result = element;
+ return DequeueResult::RemoveAndStop;
+ });
+ return result;
+ }
+
+ ThreadData* queueHead { nullptr };
+ ThreadData* queueTail { nullptr };
+
+ // This lock protects the entire bucket. Thou shall not make changes to Bucket without holding
+ // this lock.
+ WordLock lock;
+
+ double nextFairTime { 0 };
+
+ WeakRandom random;
+
+ // Put some distane between buckets in memory. This is one of several mitigations against false
+ // sharing.
+ char padding[64];
+};
+
+struct Hashtable;
+
+// We track all allocated hashtables so that hashtable resizing doesn't anger leak detectors.
+Vector<Hashtable*>* hashtables;
+StaticWordLock hashtablesLock;
+
+struct Hashtable {
+ unsigned size;
+ Atomic<Bucket*> data[1];
+
+ static Hashtable* create(unsigned size)
+ {
+ ASSERT(size >= 1);
+
+ Hashtable* result = static_cast<Hashtable*>(
+ fastZeroedMalloc(sizeof(Hashtable) + sizeof(Atomic<Bucket*>) * (size - 1)));
+ result->size = size;
+
+ {
+ // This is not fast and it's not data-access parallel, but that's fine, because
+ // hashtable resizing is guaranteed to be rare and it will never happen in steady
+ // state.
+ WordLockHolder locker(hashtablesLock);
+ if (!hashtables)
+ hashtables = new Vector<Hashtable*>();
+ hashtables->append(result);
+ }
+
+ return result;
+ }
+
+ static void destroy(Hashtable* hashtable)
+ {
+ {
+ // This is not fast, but that's OK. See comment in create().
+ WordLockHolder locker(hashtablesLock);
+ hashtables->removeFirst(hashtable);
+ }
+
+ fastFree(hashtable);
+ }
+};
+
+Atomic<Hashtable*> hashtable;
+Atomic<unsigned> numThreads;
+
+// With 64 bytes of padding per bucket, assuming a hashtable is fully populated with buckets, the
+// memory usage per thread will still be less than 1KB.
+const unsigned maxLoadFactor = 3;
+
+const unsigned growthFactor = 2;
+
+unsigned hashAddress(const void* address)
+{
+ return WTF::PtrHash<const void*>::hash(address);
+}
+
+Hashtable* ensureHashtable()
+{
+ for (;;) {
+ Hashtable* currentHashtable = hashtable.load();
+
+ if (currentHashtable)
+ return currentHashtable;
+
+ if (!currentHashtable) {
+ currentHashtable = Hashtable::create(maxLoadFactor);
+ if (hashtable.compareExchangeWeak(nullptr, currentHashtable)) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": created initial hashtable ", RawPointer(currentHashtable), "\n"));
+ return currentHashtable;
+ }
+
+ Hashtable::destroy(currentHashtable);
+ }
+ }
+}
+
+// Locks the hashtable. This reloops in case of rehashing, so the current hashtable may be different
+// after this returns than when you called it. Guarantees that there is a hashtable. This is pretty
+// slow and not scalable, so it's only used during thread creation and for debugging/testing.
+Vector<Bucket*> lockHashtable()
+{
+ for (;;) {
+ Hashtable* currentHashtable = ensureHashtable();
+
+ ASSERT(currentHashtable);
+
+ // Now find all of the buckets. This makes sure that the hashtable is full of buckets so that
+ // we can lock all of the buckets, not just the ones that are materialized.
+ Vector<Bucket*> buckets;
+ for (unsigned i = currentHashtable->size; i--;) {
+ Atomic<Bucket*>& bucketPointer = currentHashtable->data[i];
+
+ for (;;) {
+ Bucket* bucket = bucketPointer.load();
+
+ if (!bucket) {
+ bucket = new Bucket();
+ if (!bucketPointer.compareExchangeWeak(nullptr, bucket)) {
+ delete bucket;
+ continue;
+ }
+ }
+
+ buckets.append(bucket);
+ break;
+ }
+ }
+
+ // Now lock the buckets in the right order.
+ std::sort(buckets.begin(), buckets.end());
+ for (Bucket* bucket : buckets)
+ bucket->lock.lock();
+
+ // If the hashtable didn't change (wasn't rehashed) while we were locking it, then we own it
+ // now.
+ if (hashtable.load() == currentHashtable)
+ return buckets;
+
+ // The hashtable rehashed. Unlock everything and try again.
+ for (Bucket* bucket : buckets)
+ bucket->lock.unlock();
+ }
+}
+
+void unlockHashtable(const Vector<Bucket*>& buckets)
+{
+ for (Bucket* bucket : buckets)
+ bucket->lock.unlock();
+}
+
+// Rehash the hashtable to handle numThreads threads.
+void ensureHashtableSize(unsigned numThreads)
+{
+ // We try to ensure that the size of the hashtable used for thread queues is always large enough
+ // to avoid collisions. So, since we started a new thread, we may need to increase the size of the
+ // hashtable. This does just that. Note that we never free the old spine, since we never lock
+ // around spine accesses (i.e. the "hashtable" global variable).
+
+ // First do a fast check to see if rehashing is needed.
+ Hashtable* oldHashtable = hashtable.load();
+ if (oldHashtable && static_cast<double>(oldHashtable->size) / static_cast<double>(numThreads) >= maxLoadFactor) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": no need to rehash because ", oldHashtable->size, " / ", numThreads, " >= ", maxLoadFactor, "\n"));
+ return;
+ }
+
+ // Seems like we *might* have to rehash, so lock the hashtable and try again.
+ Vector<Bucket*> bucketsToUnlock = lockHashtable();
+
+ // Check again, since the hashtable could have rehashed while we were locking it. Also,
+ // lockHashtable() creates an initial hashtable for us.
+ oldHashtable = hashtable.load();
+ if (oldHashtable && static_cast<double>(oldHashtable->size) / static_cast<double>(numThreads) >= maxLoadFactor) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": after locking, no need to rehash because ", oldHashtable->size, " / ", numThreads, " >= ", maxLoadFactor, "\n"));
+ unlockHashtable(bucketsToUnlock);
+ return;
+ }
+
+ Vector<Bucket*> reusableBuckets = bucketsToUnlock;
+
+ // OK, now we resize. First we gather all thread datas from the old hashtable. These thread datas
+ // are placed into the vector in queue order.
+ Vector<ThreadData*> threadDatas;
+ for (Bucket* bucket : reusableBuckets) {
+ while (ThreadData* threadData = bucket->dequeue())
+ threadDatas.append(threadData);
+ }
+
+ unsigned newSize = numThreads * growthFactor * maxLoadFactor;
+ RELEASE_ASSERT(newSize > oldHashtable->size);
+
+ Hashtable* newHashtable = Hashtable::create(newSize);
+ if (verbose)
+ dataLog(toString(currentThread(), ": created new hashtable: ", RawPointer(newHashtable), "\n"));
+ for (ThreadData* threadData : threadDatas) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": rehashing thread data ", RawPointer(threadData), " with address = ", RawPointer(threadData->address), "\n"));
+ unsigned hash = hashAddress(threadData->address);
+ unsigned index = hash % newHashtable->size;
+ if (verbose)
+ dataLog(toString(currentThread(), ": index = ", index, "\n"));
+ Bucket* bucket = newHashtable->data[index].load();
+ if (!bucket) {
+ if (reusableBuckets.isEmpty())
+ bucket = new Bucket();
+ else
+ bucket = reusableBuckets.takeLast();
+ newHashtable->data[index].store(bucket);
+ }
+
+ bucket->enqueue(threadData);
+ }
+
+ // At this point there may be some buckets left unreused. This could easily happen if the
+ // number of enqueued threads right now is low but the high watermark of the number of threads
+ // enqueued was high. We place these buckets into the hashtable basically at random, just to
+ // make sure we don't leak them.
+ for (unsigned i = 0; i < newHashtable->size && !reusableBuckets.isEmpty(); ++i) {
+ Atomic<Bucket*>& bucketPtr = newHashtable->data[i];
+ if (bucketPtr.load())
+ continue;
+ bucketPtr.store(reusableBuckets.takeLast());
+ }
+
+ // Since we increased the size of the hashtable, we should have exhausted our preallocated
+ // buckets by now.
+ ASSERT(reusableBuckets.isEmpty());
+
+ // OK, right now the old hashtable is locked up and the new hashtable is ready to rock and
+ // roll. After we install the new hashtable, we can release all bucket locks.
+
+ bool result = hashtable.compareExchangeStrong(oldHashtable, newHashtable) == oldHashtable;
+ RELEASE_ASSERT(result);
+
+ unlockHashtable(bucketsToUnlock);
+}
+
+ThreadData::ThreadData()
+ : threadIdentifier(currentThread())
+{
+ unsigned currentNumThreads;
+ for (;;) {
+ unsigned oldNumThreads = numThreads.load();
+ currentNumThreads = oldNumThreads + 1;
+ if (numThreads.compareExchangeWeak(oldNumThreads, currentNumThreads))
+ break;
+ }
+
+ ensureHashtableSize(currentNumThreads);
+}
+
+ThreadData::~ThreadData()
+{
+ for (;;) {
+ unsigned oldNumThreads = numThreads.load();
+ if (numThreads.compareExchangeWeak(oldNumThreads, oldNumThreads - 1))
+ break;
+ }
+}
+
+ThreadData* myThreadData()
+{
+ static ThreadSpecific<RefPtr<ThreadData>, CanBeGCThread::True>* threadData;
+ static std::once_flag initializeOnce;
+ std::call_once(
+ initializeOnce,
+ [] {
+ threadData = new ThreadSpecific<RefPtr<ThreadData>, CanBeGCThread::True>();
+ });
+
+ RefPtr<ThreadData>& result = **threadData;
+
+ if (!result)
+ result = adoptRef(new ThreadData());
+
+ return result.get();
+}
+
+template<typename Functor>
+bool enqueue(const void* address, const Functor& functor)
+{
+ unsigned hash = hashAddress(address);
+
+ for (;;) {
+ Hashtable* myHashtable = ensureHashtable();
+ unsigned index = hash % myHashtable->size;
+ Atomic<Bucket*>& bucketPointer = myHashtable->data[index];
+ Bucket* bucket;
+ for (;;) {
+ bucket = bucketPointer.load();
+ if (!bucket) {
+ bucket = new Bucket();
+ if (!bucketPointer.compareExchangeWeak(nullptr, bucket)) {
+ delete bucket;
+ continue;
+ }
+ }
+ break;
+ }
+ if (verbose)
+ dataLog(toString(currentThread(), ": enqueueing onto bucket ", RawPointer(bucket), " with index ", index, " for address ", RawPointer(address), " with hash ", hash, "\n"));
+ bucket->lock.lock();
+
+ // At this point the hashtable could have rehashed under us.
+ if (hashtable.load() != myHashtable) {
+ bucket->lock.unlock();
+ continue;
+ }
+
+ ThreadData* threadData = functor();
+ bool result;
+ if (threadData) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": proceeding to enqueue ", RawPointer(threadData), "\n"));
+ bucket->enqueue(threadData);
+ result = true;
+ } else
+ result = false;
+ bucket->lock.unlock();
+ return result;
+ }
+}
+
+enum class BucketMode {
+ EnsureNonEmpty,
+ IgnoreEmpty
+};
+
+template<typename DequeueFunctor, typename FinishFunctor>
+bool dequeue(
+ const void* address, BucketMode bucketMode, const DequeueFunctor& dequeueFunctor,
+ const FinishFunctor& finishFunctor)
+{
+ unsigned hash = hashAddress(address);
+
+ for (;;) {
+ Hashtable* myHashtable = ensureHashtable();
+ unsigned index = hash % myHashtable->size;
+ Atomic<Bucket*>& bucketPointer = myHashtable->data[index];
+ Bucket* bucket = bucketPointer.load();
+ if (!bucket) {
+ if (bucketMode == BucketMode::IgnoreEmpty)
+ return false;
+
+ for (;;) {
+ bucket = bucketPointer.load();
+ if (!bucket) {
+ bucket = new Bucket();
+ if (!bucketPointer.compareExchangeWeak(nullptr, bucket)) {
+ delete bucket;
+ continue;
+ }
+ }
+ break;
+ }
+ }
+
+ bucket->lock.lock();
+
+ // At this point the hashtable could have rehashed under us.
+ if (hashtable.load() != myHashtable) {
+ bucket->lock.unlock();
+ continue;
+ }
+
+ bucket->genericDequeue(dequeueFunctor);
+ bool result = !!bucket->queueHead;
+ finishFunctor(result);
+ bucket->lock.unlock();
+ return result;
+ }
+}
+
+} // anonymous namespace
+
+NEVER_INLINE ParkingLot::ParkResult ParkingLot::parkConditionallyImpl(
+ const void* address,
+ const ScopedLambda<bool()>& validation,
+ const ScopedLambda<void()>& beforeSleep,
+ const TimeWithDynamicClockType& timeout)
+{
+ if (verbose)
+ dataLog(toString(currentThread(), ": parking.\n"));
+
+ ThreadData* me = myThreadData();
+ me->token = 0;
+
+ // Guard against someone calling parkConditionally() recursively from beforeSleep().
+ RELEASE_ASSERT(!me->address);
+
+ bool enqueueResult = enqueue(
+ address,
+ [&] () -> ThreadData* {
+ if (!validation())
+ return nullptr;
+
+ me->address = address;
+ return me;
+ });
+
+ if (!enqueueResult)
+ return ParkResult();
+
+ beforeSleep();
+
+ bool didGetDequeued;
+ {
+ MutexLocker locker(me->parkingLock);
+ while (me->address && timeout.nowWithSameClock() < timeout) {
+ me->parkingCondition.timedWait(
+ me->parkingLock, timeout.approximateWallTime().secondsSinceEpoch().value());
+
+ // It's possible for the OS to decide not to wait. If it does that then it will also
+ // decide not to release the lock. If there's a bug in the time math, then this could
+ // result in a deadlock. Flashing the lock means that at worst it's just a CPU-eating
+ // spin.
+ me->parkingLock.unlock();
+ me->parkingLock.lock();
+ }
+ ASSERT(!me->address || me->address == address);
+ didGetDequeued = !me->address;
+ }
+
+ if (didGetDequeued) {
+ // Great! We actually got dequeued rather than the timeout expiring.
+ ParkResult result;
+ result.wasUnparked = true;
+ result.token = me->token;
+ return result;
+ }
+
+ // Have to remove ourselves from the queue since we timed out and nobody has dequeued us yet.
+
+ bool didDequeue = false;
+ dequeue(
+ address, BucketMode::IgnoreEmpty,
+ [&] (ThreadData* element, bool) {
+ if (element == me) {
+ didDequeue = true;
+ return DequeueResult::RemoveAndStop;
+ }
+ return DequeueResult::Ignore;
+ },
+ [] (bool) { });
+
+ // If didDequeue is true, then we dequeued ourselves. This means that we were not unparked.
+ // If didDequeue is false, then someone unparked us.
+
+ RELEASE_ASSERT(!me->nextInQueue);
+
+ // Make sure that no matter what, me->address is null after this point.
+ {
+ MutexLocker locker(me->parkingLock);
+ if (!didDequeue) {
+ // If we did not dequeue ourselves, then someone else did. They will set our address to
+ // null. We don't want to proceed until they do this, because otherwise, they may set
+ // our address to null in some distant future when we're already trying to wait for
+ // other things.
+ while (me->address)
+ me->parkingCondition.wait(me->parkingLock);
+ }
+ me->address = nullptr;
+ }
+
+ ParkResult result;
+ result.wasUnparked = !didDequeue;
+ if (!didDequeue) {
+ // If we were unparked then there should be a token.
+ result.token = me->token;
+ }
+ return result;
+}
+
+NEVER_INLINE ParkingLot::UnparkResult ParkingLot::unparkOne(const void* address)
+{
+ if (verbose)
+ dataLog(toString(currentThread(), ": unparking one.\n"));
+
+ UnparkResult result;
+
+ RefPtr<ThreadData> threadData;
+ result.mayHaveMoreThreads = dequeue(
+ address,
+ // Why is this here?
+ // FIXME: It seems like this could be IgnoreEmpty, but I switched this to EnsureNonEmpty
+ // without explanation in r199760. We need it to use EnsureNonEmpty if we need to perform
+ // some operation while holding the bucket lock, which usually goes into the finish func.
+ // But if that operation is a no-op, then it's not clear why we need this.
+ BucketMode::EnsureNonEmpty,
+ [&] (ThreadData* element, bool) {
+ if (element->address != address)
+ return DequeueResult::Ignore;
+ threadData = element;
+ result.didUnparkThread = true;
+ return DequeueResult::RemoveAndStop;
+ },
+ [] (bool) { });
+
+ if (!threadData) {
+ ASSERT(!result.didUnparkThread);
+ result.mayHaveMoreThreads = false;
+ return result;
+ }
+
+ ASSERT(threadData->address);
+
+ {
+ MutexLocker locker(threadData->parkingLock);
+ threadData->address = nullptr;
+ threadData->token = 0;
+ }
+ threadData->parkingCondition.signal();
+
+ return result;
+}
+
+NEVER_INLINE void ParkingLot::unparkOneImpl(
+ const void* address,
+ const ScopedLambda<intptr_t(ParkingLot::UnparkResult)>& callback)
+{
+ if (verbose)
+ dataLog(toString(currentThread(), ": unparking one the hard way.\n"));
+
+ RefPtr<ThreadData> threadData;
+ bool timeToBeFair = false;
+ dequeue(
+ address,
+ BucketMode::EnsureNonEmpty,
+ [&] (ThreadData* element, bool passedTimeToBeFair) {
+ if (element->address != address)
+ return DequeueResult::Ignore;
+ threadData = element;
+ timeToBeFair = passedTimeToBeFair;
+ return DequeueResult::RemoveAndStop;
+ },
+ [&] (bool mayHaveMoreThreads) {
+ UnparkResult result;
+ result.didUnparkThread = !!threadData;
+ result.mayHaveMoreThreads = result.didUnparkThread && mayHaveMoreThreads;
+ if (timeToBeFair)
+ RELEASE_ASSERT(threadData);
+ result.timeToBeFair = timeToBeFair;
+ intptr_t token = callback(result);
+ if (threadData)
+ threadData->token = token;
+ });
+
+ if (!threadData)
+ return;
+
+ ASSERT(threadData->address);
+
+ {
+ MutexLocker locker(threadData->parkingLock);
+ threadData->address = nullptr;
+ }
+ // At this point, the threadData may die. Good thing we have a RefPtr<> on it.
+ threadData->parkingCondition.signal();
+}
+
+NEVER_INLINE unsigned ParkingLot::unparkCount(const void* address, unsigned count)
+{
+ if (!count)
+ return 0;
+
+ if (verbose)
+ dataLog(toString(currentThread(), ": unparking count = ", count, " from ", RawPointer(address), ".\n"));
+
+ Vector<RefPtr<ThreadData>, 8> threadDatas;
+ dequeue(
+ address,
+ // FIXME: It seems like this ought to be EnsureNonEmpty if we follow what unparkOne() does,
+ // but that seems wrong.
+ BucketMode::IgnoreEmpty,
+ [&] (ThreadData* element, bool) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": Observing element with address = ", RawPointer(element->address), "\n"));
+ if (element->address != address)
+ return DequeueResult::Ignore;
+ threadDatas.append(element);
+ if (threadDatas.size() == count)
+ return DequeueResult::RemoveAndStop;
+ return DequeueResult::RemoveAndContinue;
+ },
+ [] (bool) { });
+
+ for (RefPtr<ThreadData>& threadData : threadDatas) {
+ if (verbose)
+ dataLog(toString(currentThread(), ": unparking ", RawPointer(threadData.get()), " with address ", RawPointer(threadData->address), "\n"));
+ ASSERT(threadData->address);
+ {
+ MutexLocker locker(threadData->parkingLock);
+ threadData->address = nullptr;
+ }
+ threadData->parkingCondition.signal();
+ }
+
+ if (verbose)
+ dataLog(toString(currentThread(), ": done unparking.\n"));
+
+ return threadDatas.size();
+}
+
+NEVER_INLINE void ParkingLot::unparkAll(const void* address)
+{
+ unparkCount(address, UINT_MAX);
+}
+
+NEVER_INLINE void ParkingLot::forEachImpl(const ScopedLambda<void(ThreadIdentifier, const void*)>& callback)
+{
+ Vector<Bucket*> bucketsToUnlock = lockHashtable();
+
+ Hashtable* currentHashtable = hashtable.load();
+ for (unsigned i = currentHashtable->size; i--;) {
+ Bucket* bucket = currentHashtable->data[i].load();
+ if (!bucket)
+ continue;
+ for (ThreadData* currentThreadData = bucket->queueHead; currentThreadData; currentThreadData = currentThreadData->nextInQueue)
+ callback(currentThreadData->threadIdentifier, currentThreadData->address);
+ }
+
+ unlockHashtable(bucketsToUnlock);
+}
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/ParkingLot.h b/Source/WTF/wtf/ParkingLot.h
new file mode 100644
index 000000000..7d5fb17ef
--- /dev/null
+++ b/Source/WTF/wtf/ParkingLot.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_ParkingLot_h
+#define WTF_ParkingLot_h
+
+#include <functional>
+#include <wtf/Atomics.h>
+#include <wtf/ScopedLambda.h>
+#include <wtf/Threading.h>
+#include <wtf/TimeWithDynamicClockType.h>
+
+namespace WTF {
+
+class ParkingLot {
+ ParkingLot() = delete;
+ ParkingLot(const ParkingLot&) = delete;
+
+public:
+ // ParkingLot will accept any kind of time and convert it internally, but this typedef tells
+ // you what kind of time ParkingLot would be able to use without conversions. It's sad that
+ // this is WallTime not MonotonicTime, but that's just how OS wait functions work. However,
+ // because ParkingLot evaluates whether it should wait by checking if your time has passed
+ // using whatever clock you used, specifying timeouts in MonotonicTime is semantically better.
+ // For example, if the user sets his computer's clock back during the time that you wanted to
+ // wait for one second, and you specified the timeout using the MonotonicTime, then ParkingLot
+ // will be smart enough to know that your one second has elapsed.
+ typedef WallTime Time;
+
+ // Parks the thread in a queue associated with the given address, which cannot be null. The
+ // parking only succeeds if the validation function returns true while the queue lock is held.
+ //
+ // If validation returns false, it will unlock the internal parking queue and then it will
+ // return a null ParkResult (wasUnparked = false, token = 0) without doing anything else.
+ //
+ // If validation returns true, it will enqueue the thread, unlock the parking queue lock, call
+ // the beforeSleep function, and then it will sleep so long as the thread continues to be on the
+ // queue and the timeout hasn't fired. Finally, this returns wasUnparked = true if we actually
+ // got unparked or wasUnparked = false if the timeout was hit. When wasUnparked = true, the
+ // token will contain whatever token was returned from the callback to unparkOne(), or 0 if the
+ // thread was unparked using unparkAll() or the form of unparkOne() that doesn't take a
+ // callback.
+ //
+ // Note that beforeSleep is called with no locks held, so it's OK to do pretty much anything so
+ // long as you don't recursively call parkConditionally(). You can call unparkOne()/unparkAll()
+ // though. It's useful to use beforeSleep() to unlock some mutex in the implementation of
+ // Condition::wait().
+ struct ParkResult {
+ bool wasUnparked { false };
+ intptr_t token { 0 };
+ };
+ template<typename ValidationFunctor, typename BeforeSleepFunctor>
+ static ParkResult parkConditionally(
+ const void* address,
+ const ValidationFunctor& validation,
+ const BeforeSleepFunctor& beforeSleep,
+ const TimeWithDynamicClockType& timeout)
+ {
+ return parkConditionallyImpl(
+ address,
+ scopedLambdaRef<bool()>(validation),
+ scopedLambdaRef<void()>(beforeSleep),
+ timeout);
+ }
+
+ // Simple version of parkConditionally() that covers the most common case: you want to park
+ // indefinitely so long as the value at the given address hasn't changed.
+ template<typename T, typename U>
+ static ParkResult compareAndPark(const Atomic<T>* address, U expected)
+ {
+ return parkConditionally(
+ address,
+ [address, expected] () -> bool {
+ U value = address->load();
+ return value == expected;
+ },
+ [] () { },
+ Time::infinity());
+ }
+
+ // Unparking status given to you anytime you unparkOne().
+ struct UnparkResult {
+ // True if some thread was unparked.
+ bool didUnparkThread { false };
+ // True if there may be more threads on this address. This may be conservatively true.
+ bool mayHaveMoreThreads { false };
+ // This bit is randomly set to true indicating that it may be profitable to unlock the lock
+ // using a fair unlocking protocol. This is most useful when used in conjunction with
+ // unparkOne(address, callback).
+ bool timeToBeFair { false };
+ };
+
+ // Unparks one thread from the queue associated with the given address, which cannot be null.
+ // Returns true if there may still be other threads on that queue, or false if there definitely
+ // are no more threads on the queue.
+ WTF_EXPORT_PRIVATE static UnparkResult unparkOne(const void* address);
+
+ // This is an expert-mode version of unparkOne() that allows for really good thundering herd
+ // avoidance and eventual stochastic fairness in adaptive mutexes.
+ //
+ // Unparks one thread from the queue associated with the given address, and calls the given
+ // callback while the address is locked. Reports to the callback whether any thread got
+ // unparked, whether there may be any other threads still on the queue, and whether this may be
+ // a good time to do fair unlocking. The callback returns an intptr_t token, which is returned
+ // to the unparked thread via ParkResult::token.
+ //
+ // WTF::Lock and WTF::Condition both use this form of unparkOne() because it allows them to use
+ // the ParkingLot's internal queue lock to serialize some decision-making. For example, if
+ // UnparkResult::mayHaveMoreThreads is false inside the callback, then we know that at that
+ // moment nobody can add any threads to the queue because the queue lock is still held. Also,
+ // WTF::Lock uses the timeToBeFair and token mechanism to implement eventual fairness.
+ template<typename Callback>
+ static void unparkOne(const void* address, const Callback& callback)
+ {
+ unparkOneImpl(address, scopedLambdaRef<intptr_t(UnparkResult)>(callback));
+ }
+
+ WTF_EXPORT_PRIVATE static unsigned unparkCount(const void* address, unsigned count);
+
+ // Unparks every thread from the queue associated with the given address, which cannot be null.
+ WTF_EXPORT_PRIVATE static void unparkAll(const void* address);
+
+ // Locks the parking lot and walks all of the parked threads and the addresses they are waiting
+ // on. Threads that are on the same queue are guaranteed to be walked from first to last, but the
+ // queues may be randomly interleaved. For example, if the queue for address A1 has T1 and T2 and
+ // the queue for address A2 has T3 and T4, then you might see iteration orders like:
+ //
+ // A1,T1 A1,T2 A2,T3 A2,T4
+ // A2,T3 A2,T4 A1,T1 A1,T2
+ // A1,T1 A2,T3 A1,T2 A2,T4
+ // A1,T1 A2,T3 A2,T4 A1,T2
+ //
+ // As well as many other possible interleavings that all have T1 before T2 and T3 before T4 but are
+ // otherwise unconstrained. This method is useful primarily for debugging. It's also used by unit
+ // tests.
+ template<typename Func>
+ static void forEach(const Func& func)
+ {
+ forEachImpl(scopedLambdaRef<void(ThreadIdentifier, const void*)>(func));
+ }
+
+private:
+ WTF_EXPORT_PRIVATE static ParkResult parkConditionallyImpl(
+ const void* address,
+ const ScopedLambda<bool()>& validation,
+ const ScopedLambda<void()>& beforeSleep,
+ const TimeWithDynamicClockType& timeout);
+
+ WTF_EXPORT_PRIVATE static void unparkOneImpl(
+ const void* address, const ScopedLambda<intptr_t(UnparkResult)>& callback);
+
+ WTF_EXPORT_PRIVATE static void forEachImpl(const ScopedLambda<void(ThreadIdentifier, const void*)>&);
+};
+
+} // namespace WTF
+
+using WTF::ParkingLot;
+
+#endif // WTF_ParkingLot_h
+
diff --git a/Source/WTF/wtf/PassOwnPtr.h b/Source/WTF/wtf/PassOwnPtr.h
deleted file mode 100644
index 2c1f9d452..000000000
--- a/Source/WTF/wtf/PassOwnPtr.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef WTF_PassOwnPtr_h
-#define WTF_PassOwnPtr_h
-
-#include <cstddef>
-#include <wtf/Assertions.h>
-#include <wtf/OwnPtrCommon.h>
-#include <type_traits>
-
-namespace WTF {
-
- template<typename T> class OwnPtr;
- template<typename T> class PassOwnPtr;
- template<typename T> PassOwnPtr<T> adoptPtr(T*);
-
- class RefCountedBase;
- class ThreadSafeRefCountedBase;
-
- template<typename T> class PassOwnPtr {
- public:
- typedef T ValueType;
- typedef ValueType* PtrType;
-
- PassOwnPtr() : m_ptr(0) { }
- PassOwnPtr(std::nullptr_t) : m_ptr(0) { }
-
- // It somewhat breaks the type system to allow transfer of ownership out of
- // a const PassOwnPtr. However, it makes it much easier to work with PassOwnPtr
- // temporaries, and we don't have a need to use real const PassOwnPtrs anyway.
- PassOwnPtr(const PassOwnPtr& o) : m_ptr(o.leakPtr()) { }
- template<typename U> PassOwnPtr(const PassOwnPtr<U>& o) : m_ptr(o.leakPtr()) { }
-
- ~PassOwnPtr() { deleteOwnedPtr(m_ptr); }
-
- PtrType get() const { return m_ptr; }
-
- PtrType leakPtr() const WARN_UNUSED_RETURN;
-
- ValueType& operator*() const { ASSERT(m_ptr); return *m_ptr; }
- PtrType operator->() const { ASSERT(m_ptr); return m_ptr; }
-
- bool operator!() const { return !m_ptr; }
-
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef PtrType PassOwnPtr::*UnspecifiedBoolType;
- operator UnspecifiedBoolType() const { return m_ptr ? &PassOwnPtr::m_ptr : 0; }
-
- PassOwnPtr& operator=(const PassOwnPtr&) { COMPILE_ASSERT(!sizeof(T*), PassOwnPtr_should_never_be_assigned_to); return *this; }
-
- template<typename U> friend PassOwnPtr<U> adoptPtr(U*);
-
- private:
- explicit PassOwnPtr(PtrType ptr) : m_ptr(ptr) { }
-
- // We should never have two OwnPtrs for the same underlying object (otherwise we'll get
- // double-destruction), so these equality operators should never be needed.
- template<typename U> bool operator==(const PassOwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
- template<typename U> bool operator!=(const PassOwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
- template<typename U> bool operator==(const OwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
- template<typename U> bool operator!=(const OwnPtr<U>&) { COMPILE_ASSERT(!sizeof(U*), OwnPtrs_should_never_be_equal); return false; }
-
- mutable PtrType m_ptr;
- };
-
- template<typename T> inline typename PassOwnPtr<T>::PtrType PassOwnPtr<T>::leakPtr() const
- {
- PtrType ptr = m_ptr;
- m_ptr = 0;
- return ptr;
- }
-
- template<typename T, typename U> inline bool operator==(const PassOwnPtr<T>& a, const PassOwnPtr<U>& b)
- {
- return a.get() == b.get();
- }
-
- template<typename T, typename U> inline bool operator==(const PassOwnPtr<T>& a, const OwnPtr<U>& b)
- {
- return a.get() == b.get();
- }
-
- template<typename T, typename U> inline bool operator==(const OwnPtr<T>& a, const PassOwnPtr<U>& b)
- {
- return a.get() == b.get();
- }
-
- template<typename T, typename U> inline bool operator==(const PassOwnPtr<T>& a, U* b)
- {
- return a.get() == b;
- }
-
- template<typename T, typename U> inline bool operator==(T* a, const PassOwnPtr<U>& b)
- {
- return a == b.get();
- }
-
- template<typename T, typename U> inline bool operator!=(const PassOwnPtr<T>& a, const PassOwnPtr<U>& b)
- {
- return a.get() != b.get();
- }
-
- template<typename T, typename U> inline bool operator!=(const PassOwnPtr<T>& a, const OwnPtr<U>& b)
- {
- return a.get() != b.get();
- }
-
- template<typename T, typename U> inline bool operator!=(const OwnPtr<T>& a, const PassOwnPtr<U>& b)
- {
- return a.get() != b.get();
- }
-
- template<typename T, typename U> inline bool operator!=(const PassOwnPtr<T>& a, U* b)
- {
- return a.get() != b;
- }
-
- template<typename T, typename U> inline bool operator!=(T* a, const PassOwnPtr<U>& b)
- {
- return a != b.get();
- }
-
- template<typename T> inline PassOwnPtr<T> adoptPtr(T* ptr)
- {
- static_assert(!std::is_convertible<T*, RefCountedBase*>::value, "Do not use adoptPtr with RefCounted, use adoptRef!");
- static_assert(!std::is_convertible<T*, ThreadSafeRefCountedBase*>::value, "Do not use adoptPtr with ThreadSafeRefCounted, use adoptRef!");
-
- return PassOwnPtr<T>(ptr);
- }
-
- template<typename T, typename U> inline PassOwnPtr<T> static_pointer_cast(const PassOwnPtr<U>& p)
- {
- return adoptPtr(static_cast<T*>(p.leakPtr()));
- }
-
- template<typename T> inline T* getPtr(const PassOwnPtr<T>& p)
- {
- return p.get();
- }
-
-} // namespace WTF
-
-using WTF::PassOwnPtr;
-using WTF::adoptPtr;
-using WTF::static_pointer_cast;
-
-#endif // WTF_PassOwnPtr_h
diff --git a/Source/WTF/wtf/PassRef.h b/Source/WTF/wtf/PassRef.h
deleted file mode 100644
index 587374c6c..000000000
--- a/Source/WTF/wtf/PassRef.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public License
- * along with this library; see the file COPYING.LIB. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-#ifndef WTF_PassRef_h
-#define WTF_PassRef_h
-
-#include "Assertions.h"
-#include <cstddef>
-#include <utility>
-
-namespace WTF {
-
-template<typename T> class PassRef;
-template<typename T> class PassRefPtr;
-template<typename T> class Ref;
-template<typename T> class RefPtr;
-
-template<typename T> PassRef<T> adoptRef(T&);
-
-inline void adopted(const void*) { }
-
-template<typename T> class PassRef {
-public:
- PassRef(T&);
- PassRef(PassRef&&);
- template<typename U> PassRef(PassRef<U>);
-
- const T& get() const;
- T& get();
-
- void dropRef();
- T& leakRef() WARN_UNUSED_RETURN;
-
-#ifndef NDEBUG
- ~PassRef();
-#endif
-
-private:
- friend PassRef adoptRef<T>(T&);
-
- template<typename U> friend class PassRef;
- template<typename U> friend class PassRefPtr;
- template<typename U> friend class Ref;
- template<typename U> friend class RefPtr;
-
- enum AdoptTag { Adopt };
- PassRef(T&, AdoptTag);
-
- T& m_reference;
-
-#ifndef NDEBUG
- bool m_gaveUpReference;
-#endif
-};
-
-template<typename T> inline PassRef<T>::PassRef(T& reference)
- : m_reference(reference)
-#ifndef NDEBUG
- , m_gaveUpReference(false)
-#endif
-{
- reference.ref();
-}
-
-template<typename T> inline PassRef<T>::PassRef(PassRef&& other)
- : m_reference(other.leakRef())
-#ifndef NDEBUG
- , m_gaveUpReference(false)
-#endif
-{
-}
-
-template<typename T> template<typename U> inline PassRef<T>::PassRef(PassRef<U> other)
- : m_reference(other.leakRef())
-#ifndef NDEBUG
- , m_gaveUpReference(false)
-#endif
-{
-}
-
-#ifndef NDEBUG
-
-template<typename T> PassRef<T>::~PassRef()
-{
- ASSERT(m_gaveUpReference);
-}
-
-#endif
-
-template<typename T> inline void PassRef<T>::dropRef()
-{
- ASSERT(!m_gaveUpReference);
- m_reference.deref();
-#ifndef NDEBUG
- m_gaveUpReference = true;
-#endif
-}
-
-template<typename T> inline const T& PassRef<T>::get() const
-{
- ASSERT(!m_gaveUpReference);
- return m_reference;
-}
-
-template<typename T> inline T& PassRef<T>::get()
-{
- ASSERT(!m_gaveUpReference);
- return m_reference;
-}
-
-template<typename T> inline T& PassRef<T>::leakRef()
-{
-#ifndef NDEBUG
- ASSERT(!m_gaveUpReference);
- m_gaveUpReference = true;
-#endif
- return m_reference;
-}
-
-template<typename T> inline PassRef<T>::PassRef(T& reference, AdoptTag)
- : m_reference(reference)
-#ifndef NDEBUG
- , m_gaveUpReference(false)
-#endif
-{
-}
-
-template<typename T> inline PassRef<T> adoptRef(T& reference)
-{
- adopted(&reference);
- return PassRef<T>(reference, PassRef<T>::Adopt);
-}
-
-template<typename T, typename... Args> inline PassRef<T> createRefCounted(Args&&... args)
-{
- return adoptRef(*new T(std::forward<Args>(args)...));
-}
-
-} // namespace WTF
-
-using WTF::PassRef;
-using WTF::adoptRef;
-using WTF::createRefCounted;
-
-#endif // WTF_PassRef_h
diff --git a/Source/WTF/wtf/PassRefPtr.h b/Source/WTF/wtf/PassRefPtr.h
index 62924464c..089192b0b 100644
--- a/Source/WTF/wtf/PassRefPtr.h
+++ b/Source/WTF/wtf/PassRefPtr.h
@@ -21,11 +21,12 @@
#ifndef WTF_PassRefPtr_h
#define WTF_PassRefPtr_h
-#include "PassRef.h"
+#include <wtf/Ref.h>
namespace WTF {
- template<typename T> PassRefPtr<T> adoptRef(T*);
+ template<typename T> class RefPtr;
+ template<typename T> class PassRefPtr;
template<typename T> ALWAYS_INLINE void refIfNotNull(T* ptr)
{
@@ -41,6 +42,9 @@ namespace WTF {
template<typename T> class PassRefPtr {
public:
+ typedef T ValueType;
+ typedef ValueType* PtrType;
+
PassRefPtr() : m_ptr(nullptr) { }
PassRefPtr(T* ptr) : m_ptr(ptr) { refIfNotNull(ptr); }
// It somewhat breaks the type system to allow transfer of ownership out of
@@ -49,10 +53,11 @@ namespace WTF {
PassRefPtr(const PassRefPtr& o) : m_ptr(o.leakRef()) { }
template<typename U> PassRefPtr(const PassRefPtr<U>& o) : m_ptr(o.leakRef()) { }
- ALWAYS_INLINE ~PassRefPtr() { derefIfNotNull(m_ptr); }
+ ALWAYS_INLINE ~PassRefPtr() { derefIfNotNull(std::exchange(m_ptr, nullptr)); }
template<typename U> PassRefPtr(const RefPtr<U>&);
- template<typename U> PassRefPtr(PassRef<U> reference) : m_ptr(&reference.leakRef()) { }
+ template<typename U> PassRefPtr(Ref<U>&& reference) : m_ptr(&reference.leakRef()) { }
+ template<typename U> PassRefPtr(RefPtr<U>&& reference) : m_ptr(reference.leakRef()) { }
T* get() const { return m_ptr; }
@@ -67,7 +72,7 @@ namespace WTF {
typedef T* (PassRefPtr::*UnspecifiedBoolType);
operator UnspecifiedBoolType() const { return m_ptr ? &PassRefPtr::m_ptr : nullptr; }
- friend PassRefPtr adoptRef<T>(T*);
+ template<typename V, typename U> friend PassRefPtr<V> static_pointer_cast(const PassRefPtr<U>&);
private:
PassRefPtr& operator=(const PassRefPtr&) = delete;
@@ -87,9 +92,7 @@ namespace WTF {
template<typename T> inline T* PassRefPtr<T>::leakRef() const
{
- T* ptr = m_ptr;
- m_ptr = nullptr;
- return ptr;
+ return std::exchange(m_ptr, nullptr);
}
template<typename T, typename U> inline bool operator==(const PassRefPtr<T>& a, const PassRefPtr<U>& b)
@@ -141,27 +144,19 @@ namespace WTF {
{
return a != b.get();
}
-
- template<typename T> inline PassRefPtr<T> adoptRef(T* p)
- {
- adopted(p);
- return PassRefPtr<T>(p, PassRefPtr<T>::Adopt);
- }
- template<typename T, typename U> inline PassRefPtr<T> static_pointer_cast(const PassRefPtr<U>& p)
- {
- return adoptRef(static_cast<T*>(p.leakRef()));
- }
-
- template<typename T> inline T* getPtr(const PassRefPtr<T>& p)
+ template<typename V, typename U> inline PassRefPtr<V> static_pointer_cast(const PassRefPtr<U>& p)
{
- return p.get();
+ return PassRefPtr<V>(static_cast<V*>(p.leakRef()), PassRefPtr<V>::Adopt);
}
+ template <typename T> struct IsSmartPtr<PassRefPtr<T>> {
+ static const bool value = true;
+ };
+
} // namespace WTF
using WTF::PassRefPtr;
-using WTF::adoptRef;
using WTF::static_pointer_cast;
#endif // WTF_PassRefPtr_h
diff --git a/Source/WTF/wtf/Platform.h b/Source/WTF/wtf/Platform.h
index 318c9dd14..44d929c33 100644
--- a/Source/WTF/wtf/Platform.h
+++ b/Source/WTF/wtf/Platform.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2007, 2008, 2009, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2006-2009, 2013-2015 Apple Inc. All rights reserved.
* Copyright (C) 2007-2009 Torch Mobile, Inc.
* Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.
*
@@ -12,10 +12,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -51,7 +51,7 @@
/* ==== Policy decision macros: these define policy choices for a particular port. ==== */
/* USE() - use a particular third-party library or optional OS service */
-#define USE(WTF_FEATURE) (defined WTF_USE_##WTF_FEATURE && WTF_USE_##WTF_FEATURE)
+#define USE(WTF_FEATURE) (defined USE_##WTF_FEATURE && USE_##WTF_FEATURE)
/* ENABLE() - turn on a specific feature of WebKit */
#define ENABLE(WTF_FEATURE) (defined ENABLE_##WTF_FEATURE && ENABLE_##WTF_FEATURE)
@@ -100,23 +100,9 @@
#define WTF_MIPS_DOUBLE_FLOAT (defined __mips_hard_float && !defined __mips_single_float)
#define WTF_MIPS_FP64 (defined __mips_fpr && __mips_fpr == 64)
/* MIPS requires allocators to use aligned memory */
-#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+#define USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
#endif /* MIPS */
-/* CPU(PPC) - PowerPC 32-bit */
-#if ( defined(__ppc__) \
- || defined(__PPC__) \
- || defined(__powerpc__) \
- || defined(__powerpc) \
- || defined(__POWERPC__) \
- || defined(_M_PPC) \
- || defined(__PPC)) \
- && defined(__BYTE_ORDER__) \
- && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
-#define WTF_CPU_PPC 1
-#define WTF_CPU_BIG_ENDIAN 1
-#endif
-
/* CPU(PPC64) - PowerPC 64-bit Big Endian */
#if ( defined(__ppc64__) \
|| defined(__PPC64__)) \
@@ -136,6 +122,21 @@
#define WTF_CPU_PPC64LE 1
#endif
+/* CPU(PPC) - PowerPC 32-bit */
+#if ( defined(__ppc__) \
+ || defined(__PPC__) \
+ || defined(__powerpc__) \
+ || defined(__powerpc) \
+ || defined(__POWERPC__) \
+ || defined(_M_PPC) \
+ || defined(__PPC)) \
+ && !CPU(PPC64) \
+ && defined(__BYTE_ORDER__) \
+ && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+#define WTF_CPU_PPC 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
/* CPU(SH4) - SuperH SH-4 */
#if defined(__SH4__)
#define WTF_CPU_SH4 1
@@ -148,7 +149,8 @@
#endif
/* CPU(S390) - S390 32-bit */
-#if defined(__s390__)
+#if ( defined(__s390__) \
+ && !CPU(S390X))
#define WTF_CPU_S390 1
#define WTF_CPU_BIG_ENDIAN 1
#endif
@@ -160,12 +162,18 @@
|| defined(_X86_) \
|| defined(__THW_INTEL)
#define WTF_CPU_X86 1
+
+#if defined(__SSE2__) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)
+#define WTF_CPU_X86_SSE2 1
+#endif
+
#endif
/* CPU(X86_64) - AMD64 / Intel64 / x86_64 64-bit */
#if defined(__x86_64__) \
|| defined(_M_X64)
#define WTF_CPU_X86_64 1
+#define WTF_CPU_X86_SSE2 1
#endif
/* CPU(ARM64) - Apple */
@@ -213,7 +221,7 @@
|| defined(__ARM_ARCH_5TEJ__)
#define WTF_ARM_ARCH_VERSION 5
/*ARMv5TE requires allocators to use aligned memory*/
-#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+#define USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
#elif defined(__ARM_ARCH_6__) \
|| defined(__ARM_ARCH_6J__) \
@@ -225,6 +233,7 @@
#define WTF_ARM_ARCH_VERSION 6
#elif defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7K__) \
|| defined(__ARM_ARCH_7R__) \
|| defined(__ARM_ARCH_7S__)
#define WTF_ARM_ARCH_VERSION 7
@@ -244,7 +253,7 @@
|| defined(__TARGET_ARCH_5TE) \
|| defined(__TARGET_ARCH_5TEJ)
/*ARMv5TE requires allocators to use aligned memory*/
-#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+#define USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
#endif
#else
@@ -271,6 +280,7 @@
#elif defined(__ARM_ARCH_6T2__) \
|| defined(__ARM_ARCH_7__) \
|| defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7K__) \
|| defined(__ARM_ARCH_7M__) \
|| defined(__ARM_ARCH_7R__) \
|| defined(__ARM_ARCH_7S__)
@@ -315,8 +325,8 @@
#define WTF_CPU_ARM_NEON 1
#endif
-#if CPU(ARM_NEON) && (!COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 7, 0))
-// All NEON intrinsics usage can be disabled by this macro.
+#if CPU(ARM_NEON)
+/* All NEON intrinsics usage can be disabled by this macro. */
#define HAVE_ARM_NEON_INTRINSICS 1
#endif
@@ -324,10 +334,18 @@
#define WTF_CPU_ARM_VFP 1
#endif
+#if defined(__ARM_ARCH_7K__)
+#define WTF_CPU_APPLE_ARMV7K 1
+#endif
+
#if defined(__ARM_ARCH_7S__)
#define WTF_CPU_APPLE_ARMV7S 1
#endif
+#if defined(__ARM_ARCH_EXT_IDIV__) || CPU(APPLE_ARMV7S)
+#define HAVE_ARM_IDIV_INSTRUCTIONS 1
+#endif
+
#endif /* ARM */
#if CPU(ARM) || CPU(MIPS) || CPU(SH4)
@@ -359,19 +377,6 @@
#define WTF_OS_IOS 1
#elif OS(DARWIN) && defined(TARGET_OS_MAC) && TARGET_OS_MAC
#define WTF_OS_MAC_OS_X 1
-
-/* FIXME: These can be removed after sufficient time has passed since the removal of BUILDING_ON / TARGETING macros. */
-
-#define ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED 0 / 0
-#define ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED 0 / 0
-
-#define BUILDING_ON_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
-#define BUILDING_ON_SNOW_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
-#define BUILDING_ON_LION ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
-
-#define TARGETING_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
-#define TARGETING_SNOW_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
-#define TARGETING_LION ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
#endif
/* OS(FREEBSD) - FreeBSD */
@@ -404,11 +409,6 @@
#define WTF_OS_SOLARIS 1
#endif
-/* OS(WINCE) - Windows CE; note that for this platform OS(WINDOWS) is also defined */
-#if defined(_WIN32_WCE)
-#define WTF_OS_WINCE 1
-#endif
-
/* OS(WINDOWS) - Any version of Windows */
#if defined(WIN32) || defined(_WIN32)
#define WTF_OS_WINDOWS 1
@@ -434,79 +434,128 @@
/* Operating environments */
+/* Standard libraries */
+#if defined(HAVE_FEATURES_H) && HAVE_FEATURES_H
+/* If the included features.h is glibc's one, __GLIBC__ is defined. */
+#include <features.h>
+#endif
+
/* FIXME: these are all mixes of OS, operating environment and policy choices. */
-/* PLATFORM(EFL) */
/* PLATFORM(GTK) */
/* PLATFORM(MAC) */
+/* PLATFORM(IOS) */
+/* PLATFORM(IOS_SIMULATOR) */
/* PLATFORM(WIN) */
-#if defined(BUILDING_EFL__)
-#define WTF_PLATFORM_EFL 1
-#elif defined(BUILDING_GTK__)
+#if defined(BUILDING_GTK__)
#define WTF_PLATFORM_GTK 1
-#elif OS(DARWIN)
-#define WTF_PLATFORM_COCOA 1
+#elif defined(BUILDING_JSCONLY__)
+/* JSCOnly does not provide PLATFORM() macro */
+#elif OS(MAC_OS_X)
#define WTF_PLATFORM_MAC 1
+#elif OS(IOS)
+#define WTF_PLATFORM_IOS 1
+#if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
+#define WTF_PLATFORM_IOS_SIMULATOR 1
+#endif
#elif OS(WINDOWS)
#define WTF_PLATFORM_WIN 1
#endif
-/* PLATFORM(IOS) */
-/* FIXME: this is sometimes used as an OS switch and sometimes for higher-level things */
-#if (defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE)
-#define WTF_PLATFORM_IOS 1
+/* PLATFORM(COCOA) */
+#if PLATFORM(MAC) || PLATFORM(IOS)
+#define WTF_PLATFORM_COCOA 1
#endif
-/* PLATFORM(IOS_SIMULATOR) */
-#if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
-#define WTF_PLATFORM_IOS 1
-#define WTF_PLATFORM_IOS_SIMULATOR 1
+#if PLATFORM(COCOA)
+#if defined __has_include && __has_include(<CoreFoundation/CFPriv.h>)
+#define USE_APPLE_INTERNAL_SDK 1
+#endif
+#endif
+
+/* PLATFORM(APPLETV) */
+#if defined(TARGET_OS_TV) && TARGET_OS_TV
+#define WTF_PLATFORM_APPLETV 1
+#endif
+
+/* PLATFORM(WATCHOS) */
+#if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH
+#define WTF_PLATFORM_WATCHOS 1
#endif
/* Graphics engines */
/* USE(CG) and PLATFORM(CI) */
-#if PLATFORM(MAC) || PLATFORM(IOS) || (PLATFORM(WIN) && !USE(WINGDI) && !PLATFORM(WIN_CAIRO))
-#define WTF_USE_CG 1
+#if PLATFORM(COCOA) || (PLATFORM(WIN) && !USE(WINGDI) && !PLATFORM(WIN_CAIRO) && !USE(DIRECT2D))
+#define USE_CG 1
#endif
-#if PLATFORM(MAC) || PLATFORM(IOS) || (PLATFORM(WIN) && USE(CG))
-#define WTF_USE_CA 1
+#if PLATFORM(COCOA) || (PLATFORM(WIN) && USE(CG) && !USE(DIRECT2D))
+#define USE_CA 1
+#endif
+
+#if PLATFORM(GTK)
+#define USE_CAIRO 1
+#define USE_GLIB 1
+#define USE_FREETYPE 1
+#define USE_HARFBUZZ 1
+#define USE_SOUP 1
+#define USE_WEBP 1
#endif
#if PLATFORM(GTK)
-#define WTF_USE_CAIRO 1
-#define WTF_USE_GLIB 1
-#define WTF_USE_FREETYPE 1
-#define WTF_USE_HARFBUZZ 1
-#define WTF_USE_SOUP 1
-#define WTF_USE_WEBP 1
-#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_36
+#endif
+
+#if PLATFORM(GTK) && !defined(GTK_API_VERSION_2)
+#define GDK_VERSION_MIN_REQUIRED GDK_VERSION_3_6
+#endif
+
+#if USE(SOUP)
+#define SOUP_VERSION_MIN_REQUIRED SOUP_VERSION_2_42
#endif
/* On Windows, use QueryPerformanceCounter by default */
#if OS(WINDOWS)
-#define WTF_USE_QUERY_PERFORMANCE_COUNTER 1
+#define USE_QUERY_PERFORMANCE_COUNTER 1
#endif
#if PLATFORM(COCOA)
-#define WTF_USE_CF 1
-#define WTF_USE_FOUNDATION 1
+#define USE_CF 1
+#define USE_FOUNDATION 1
+#define USE_NETWORK_CFDATA_ARRAY_CALLBACK 1
+#define ENABLE_USER_MESSAGE_HANDLERS 1
+#define HAVE_OUT_OF_PROCESS_LAYER_HOSTING 1
+#define HAVE_DTRACE 0
+
+#if !PLATFORM(WATCHOS) && !PLATFORM(APPLETV)
+#define HAVE_AVKIT 1
+#define HAVE_PARENTAL_CONTROLS 1
+#endif
#endif
-#if PLATFORM(MAC) && !PLATFORM(IOS)
+#if PLATFORM(MAC)
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED < 101200
+#define USE_QTKIT 1
+#else
+#define USE_QTKIT 0
+#endif
-#define WTF_USE_APPKIT 1
+#define USE_APPKIT 1
#define HAVE_RUNLOOP_TIMER 1
#define HAVE_SEC_IDENTITY 1
#define HAVE_SEC_KEYCHAIN 1
#if CPU(X86_64)
-#define WTF_USE_PLUGIN_HOST_PROCESS 1
+#define HAVE_NETWORK_EXTENSION 1
+#define USE_PLUGIN_HOST_PROCESS 1
#endif
-#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
-#define HAVE_LAYER_HOSTING_IN_WINDOW_SERVER 1
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100
+#define HAVE_NSSCROLLING_FILTERS 1
+#else
+#define HAVE_NSSCROLLING_FILTERS 0
#endif
/* OS X defines a series of platform macros for debugging. */
@@ -515,106 +564,97 @@
#undef __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES
#define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0
-#endif /* PLATFORM(MAC) && !PLATFORM(IOS) */
-
-#if OS(DARWIN) && !PLATFORM(GTK)
-#define ENABLE_PURGEABLE_MEMORY 1
-#endif
+#endif /* PLATFORM(MAC) */
#if PLATFORM(IOS)
-#define DONT_FINALIZE_ON_MAIN_THREAD 1
+#if USE(APPLE_INTERNAL_SDK) \
+ && ((TARGET_OS_IOS && __IPHONE_OS_VERSION_MAX_ALLOWED < 100000) \
+ || (PLATFORM(APPLETV) && __TV_OS_VERSION_MAX_ALLOWED < 100000) \
+ || (PLATFORM(WATCHOS) && __WATCH_OS_VERSION_MAX_ALLOWED < 30000))
+#define USE_CFURLCONNECTION 1
+#endif
+
+#define HAVE_NETWORK_EXTENSION 1
#define HAVE_READLINE 1
-#define WTF_USE_CFNETWORK 1
-#define WTF_USE_NETWORK_CFDATA_ARRAY_CALLBACK 1
-#define WTF_USE_UIKIT_EDITING 1
-#define WTF_USE_WEB_THREAD 1
-#define WTF_USE_QUICK_LOOK 1
+#define USE_UIKIT_EDITING 1
+#define USE_WEB_THREAD 1
+
+#if !PLATFORM(WATCHOS) && !PLATFORM(APPLETV)
+#define USE_QUICK_LOOK 1
+#endif
+
+#if TARGET_OS_IOS
+#define HAVE_APP_LINKS 1
+#endif
#if CPU(ARM64)
#define ENABLE_JIT_CONSTANT_BLINDING 0
#endif
+#if CPU(ARM_NEON)
+#undef HAVE_ARM_NEON_INTRINSICS
+#define HAVE_ARM_NEON_INTRINSICS 0
+#endif
+
#endif /* PLATFORM(IOS) */
#if PLATFORM(WIN) && !USE(WINGDI)
-#define WTF_USE_CF 1
+#define USE_CF 1
#endif
#if PLATFORM(WIN) && !USE(WINGDI) && !PLATFORM(WIN_CAIRO)
-#define WTF_USE_CFNETWORK 1
-#endif
-
-#if USE(CFNETWORK) || PLATFORM(MAC) || PLATFORM(IOS)
-#define WTF_USE_CFURLCACHE 1
+#define USE_CFURLCONNECTION 1
#endif
#if !defined(HAVE_ACCESSIBILITY)
-#if PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(GTK) || PLATFORM(EFL)
+#if PLATFORM(COCOA) || PLATFORM(WIN) || PLATFORM(GTK)
#define HAVE_ACCESSIBILITY 1
#endif
#endif /* !defined(HAVE_ACCESSIBILITY) */
-#if OS(UNIX)
+/* FIXME: Remove after CMake build enabled on Darwin */
+#if OS(DARWIN)
#define HAVE_ERRNO_H 1
#define HAVE_LANGINFO_H 1
+#define HAVE_LOCALTIME_R 1
#define HAVE_MMAP 1
#define HAVE_SIGNAL_H 1
+#define HAVE_STAT_BIRTHTIME 1
#define HAVE_STRINGS_H 1
+#define HAVE_STRNSTR 1
#define HAVE_SYS_PARAM_H 1
#define HAVE_SYS_TIME_H 1
-#define WTF_USE_PTHREADS 1
-#endif /* OS(UNIX) */
-
-#if (OS(FREEBSD) || OS(OPENBSD)) && !defined(__GLIBC__)
-#define HAVE_PTHREAD_NP_H 1
-#endif
-
-#if !defined(HAVE_VASPRINTF)
-#if !COMPILER(MSVC) && !COMPILER(MINGW)
-#define HAVE_VASPRINTF 1
-#endif
-#endif
-
-#if !defined(HAVE_STRNSTR)
-#if OS(DARWIN) || (OS(FREEBSD) && !defined(__GLIBC__))
-#define HAVE_STRNSTR 1
-#endif
-#endif
-
-#if !OS(WINDOWS) && !OS(SOLARIS)
#define HAVE_TM_GMTOFF 1
#define HAVE_TM_ZONE 1
#define HAVE_TIMEGM 1
-#endif
+#endif /* OS(DARWIN) */
-#if OS(DARWIN)
+#if OS(UNIX)
+#define USE_PTHREADS 1
+#endif /* OS(UNIX) */
+#if OS(DARWIN)
#define HAVE_DISPATCH_H 1
#define HAVE_MADV_FREE 1
#define HAVE_MADV_FREE_REUSE 1
+#define HAVE_MADV_DONTNEED 1
#define HAVE_MERGESORT 1
#define HAVE_PTHREAD_SETNAME_NP 1
#define HAVE_READLINE 1
#define HAVE_SYS_TIMEB_H 1
-#define WTF_USE_ACCELERATE 1
+#if !PLATFORM(GTK)
+#define USE_ACCELERATE 1
+#endif
#if !PLATFORM(IOS)
#define HAVE_HOSTED_CORE_ANIMATION 1
#endif
#endif /* OS(DARWIN) */
-#if OS(WINDOWS) && !OS(WINCE)
-
-#define HAVE_SYS_TIMEB_H 1
-#define HAVE_ALIGNED_MALLOC 1
-#define HAVE_ISDEBUGGERPRESENT 1
-
-#endif
-
-#if OS(WINDOWS)
-#define HAVE_VIRTUALALLOC 1
+#if (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200) || (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
+#define HAVE_CFNETWORK_STORAGE_PARTITIONING 1
#endif
/* ENABLE macro defaults */
@@ -624,34 +664,16 @@
/* Include feature macros */
#include <wtf/FeatureDefines.h>
-#if PLATFORM(EFL)
-#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#if USE(APPLE_INTERNAL_SDK) && __has_include(<WebKitAdditions/AdditionalFeatureDefines.h>)
+#include <WebKitAdditions/AdditionalFeatureDefines.h>
#endif
#if OS(WINDOWS)
-#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
#define USE_SYSTEM_MALLOC 1
#endif
-#if !defined(ENABLE_GLOBAL_FASTMALLOC_NEW)
-#define ENABLE_GLOBAL_FASTMALLOC_NEW 1
-#endif
-
-#define ENABLE_DEBUG_WITH_BREAKPOINT 0
-#define ENABLE_SAMPLING_COUNTERS 0
-#define ENABLE_SAMPLING_FLAGS 0
-#define ENABLE_SAMPLING_REGIONS 0
-#define ENABLE_OPCODE_SAMPLING 0
-#define ENABLE_CODEBLOCK_SAMPLING 0
-#if ENABLE(CODEBLOCK_SAMPLING) && !ENABLE(OPCODE_SAMPLING)
-#error "CODEBLOCK_SAMPLING requires OPCODE_SAMPLING"
-#endif
-#if ENABLE(OPCODE_SAMPLING) || ENABLE(SAMPLING_FLAGS) || ENABLE(SAMPLING_REGIONS)
-#define ENABLE_SAMPLING_THREAD 1
-#endif
-
-#if !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32_64)
-#if (CPU(X86_64) && (OS(UNIX) || OS(WINDOWS))) \
+#if !defined(USE_JSVALUE64) && !defined(USE_JSVALUE32_64)
+#if (CPU(X86_64) && !defined(__ILP32__) && (OS(UNIX) || OS(WINDOWS))) \
|| (CPU(IA64) && !CPU(IA64_32)) \
|| CPU(ALPHA) \
|| CPU(ARM64) \
@@ -659,120 +681,107 @@
|| CPU(MIPS64) \
|| CPU(PPC64) \
|| CPU(PPC64LE)
-#define WTF_USE_JSVALUE64 1
+#define USE_JSVALUE64 1
#else
-#define WTF_USE_JSVALUE32_64 1
-#endif
-#endif /* !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32_64) */
-
-/* Disable the JITs if we're forcing the cloop to be enabled */
-#if defined(ENABLE_LLINT_C_LOOP) && ENABLE_LLINT_C_LOOP
-#define ENABLE_JIT 0
-#define ENABLE_DFG_JIT 0
-#define ENABLE_FTL_JIT 0
-#endif
-
-/* Disable the JIT on versions of GCC prior to 4.1 */
-#if !defined(ENABLE_JIT) && COMPILER(GCC) && !GCC_VERSION_AT_LEAST(4, 1, 0)
-#define ENABLE_JIT 0
+#define USE_JSVALUE32_64 1
#endif
+#endif /* !defined(USE_JSVALUE64) && !defined(USE_JSVALUE32_64) */
-/* The JIT is enabled by default on all x86, x86-64, ARM & MIPS platforms except Win64. */
+/* The JIT is enabled by default on all x86, x86-64, ARM & MIPS platforms except ARMv7k. */
#if !defined(ENABLE_JIT) \
&& (CPU(X86) || CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)) \
- && (OS(DARWIN) || !COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 1, 0)) \
- && !OS(WINCE) \
- && !(OS(WINDOWS) && CPU(X86_64))
+ && !CPU(APPLE_ARMV7K)
#define ENABLE_JIT 1
#endif
-/* Do we have LLVM? */
-#if !defined(HAVE_LLVM) && PLATFORM(MAC) && ENABLE(FTL_JIT) && CPU(X86_64)
-#define HAVE_LLVM 1
-#endif
-
-#if PLATFORM(GTK) && HAVE(LLVM) && ENABLE(JIT) && !defined(ENABLE_FTL_JIT) && CPU(X86_64)
-#define ENABLE_FTL_JIT 1
+/* The FTL *does not* work on 32-bit platforms. Disable it even if someone asked us to enable it. */
+#if USE(JSVALUE32_64)
+#undef ENABLE_FTL_JIT
+#define ENABLE_FTL_JIT 0
#endif
-/* If possible, try to enable the LLVM disassembler. This is optional and we can
- fall back on UDis86 if necessary. */
-#if !defined(WTF_USE_LLVM_DISASSEMBLER) && HAVE(LLVM) && (CPU(X86_64) || CPU(X86))
-#define WTF_USE_LLVM_DISASSEMBLER 1
+/* The FTL is disabled on the iOS simulator, mostly for simplicity. */
+#if PLATFORM(IOS_SIMULATOR)
+#undef ENABLE_FTL_JIT
+#define ENABLE_FTL_JIT 0
#endif
/* If possible, try to enable a disassembler. This is optional. We proceed in two
steps: first we try to find some disassembler that we can use, and then we
decide if the high-level disassembler API can be enabled. */
-#if !defined(WTF_USE_UDIS86) && ENABLE(JIT) && (PLATFORM(MAC) || (PLATFORM(EFL) && OS(LINUX))) \
+#if !defined(USE_UDIS86) && ENABLE(JIT) && ((OS(DARWIN) && !PLATFORM(GTK)) || (OS(LINUX) && PLATFORM(GTK))) \
&& (CPU(X86) || CPU(X86_64))
-#define WTF_USE_UDIS86 1
+#define USE_UDIS86 1
#endif
-#if !defined(ENABLE_DISASSEMBLER) && (USE(UDIS86) || USE(LLVM_DISASSEMBLER))
+#if !defined(ENABLE_DISASSEMBLER) && USE(UDIS86)
#define ENABLE_DISASSEMBLER 1
#endif
-#if !defined(WTF_USE_ARM64_DISASSEMBLER) && ENABLE(JIT) && PLATFORM(IOS) && CPU(ARM64)
-#define WTF_USE_ARM64_DISASSEMBLER 1
+#if !defined(USE_ARM64_DISASSEMBLER) && ENABLE(JIT) && CPU(ARM64)
+#define USE_ARM64_DISASSEMBLER 1
#endif
-#if !defined(ENABLE_DISASSEMBLER) && (USE(UDIS86) || USE(ARMV7_DISASSEMBLER) || USE(ARM64_DISASSEMBLER))
-#define ENABLE_DISASSEMBLER 1
+#if !defined(USE_ARMV7_DISASSEMBLER) && ENABLE(JIT) && CPU(ARM_THUMB2)
+#define USE_ARMV7_DISASSEMBLER 1
+#endif
+
+#if !defined(USE_ARM_LLVM_DISASSEMBLER) && ENABLE(JIT) && CPU(ARM_TRADITIONAL) && HAVE(LLVM)
+#define USE_ARM_LLVM_DISASSEMBLER 1
#endif
-/* On some of the platforms where we have a JIT, we want to also have the
- low-level interpreter. */
-#if !defined(ENABLE_LLINT) \
- && ENABLE(JIT) \
- && (OS(DARWIN) || OS(LINUX) || OS(FREEBSD) || OS(HURD)) \
- && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(GTK)) \
- && (CPU(X86) || CPU(X86_64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(ARM64) || CPU(MIPS) || CPU(SH4))
-#define ENABLE_LLINT 1
+#if !defined(ENABLE_DISASSEMBLER) && (USE(UDIS86) || USE(ARMV7_DISASSEMBLER) || USE(ARM64_DISASSEMBLER) || USE(ARM_LLVM_DISASSEMBLER))
+#define ENABLE_DISASSEMBLER 1
#endif
-#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT) && !COMPILER(MSVC)
+#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT)
/* Enable the DFG JIT on X86 and X86_64. */
-#if (CPU(X86) || CPU(X86_64)) && (OS(DARWIN) || OS(LINUX) || OS(FREEBSD) || OS(HURD))
+#if (CPU(X86) || CPU(X86_64)) && (OS(DARWIN) || OS(LINUX) || OS(FREEBSD) || OS(WINDOWS) || OS(HURD))
#define ENABLE_DFG_JIT 1
#endif
/* Enable the DFG JIT on ARMv7. Only tested on iOS and Qt/GTK+ Linux. */
-#if (CPU(ARM_THUMB2) || CPU(ARM64)) && (PLATFORM(IOS) || PLATFORM(GTK) || PLATFORM(EFL))
+#if (CPU(ARM_THUMB2) || CPU(ARM64)) && (PLATFORM(IOS) || PLATFORM(GTK))
#define ENABLE_DFG_JIT 1
#endif
-/* Enable the DFG JIT on ARM, MIPS and SH4. */
-#if CPU(ARM_TRADITIONAL) || CPU(MIPS) || CPU(SH4)
+/* Enable the DFG JIT on ARM and MIPS. */
+#if CPU(ARM_TRADITIONAL) || CPU(MIPS)
#define ENABLE_DFG_JIT 1
#endif
#endif
-/* Concurrent JIT only works on 64-bit platforms because it requires that
+/* Concurrent JS only works on 64-bit platforms because it requires that
values get stored to atomically. This is trivially true on 64-bit platforms,
but not true at all on 32-bit platforms where values are composed of two
separate sub-values. */
-#if PLATFORM(MAC) && ENABLE(DFG_JIT) && USE(JSVALUE64)
-#define ENABLE_CONCURRENT_JIT 1
+#if ENABLE(DFG_JIT) && USE(JSVALUE64)
+#define ENABLE_CONCURRENT_JS 1
+#endif
+
+/* This controls whether B3 is built. B3 is needed for FTL JIT and WebAssembly */
+#if ENABLE(FTL_JIT) || ENABLE(WEBASSEMBLY)
+#define ENABLE_B3_JIT 1
#endif
-/* If the jit is not available, enable the LLInt C Loop: */
+/* If the baseline jit is not available, then disable upper tiers as well: */
#if !ENABLE(JIT)
-#undef ENABLE_LLINT /* Undef so that we can redefine it. */
-#undef ENABLE_LLINT_C_LOOP /* Undef so that we can redefine it. */
-#undef ENABLE_DFG_JIT /* Undef so that we can redefine it. */
-#define ENABLE_LLINT 1
-#define ENABLE_LLINT_C_LOOP 1
+#undef ENABLE_DFG_JIT
+#undef ENABLE_FTL_JIT
+#undef ENABLE_B3_JIT
#define ENABLE_DFG_JIT 0
+#define ENABLE_FTL_JIT 0
+#define ENABLE_B3_JIT 0
#endif
-/* Do a sanity check to make sure that we at least have one execution engine in
- use: */
-#if !(ENABLE(JIT) || ENABLE(LLINT))
-#error You have to have at least one execution model enabled to build JSC
+/* The SamplingProfiler is the probabilistic and low-overhead profiler used by
+ * JSC to measure where time is spent inside a JavaScript program.
+ * In configurations other than Windows and Darwin, because layout of mcontext_t depends on standard libraries (like glibc),
+ * sampling profiler is enabled if WebKit uses pthreads and glibc. */
+#if !defined(ENABLE_SAMPLING_PROFILER)
+#if (OS(DARWIN) || OS(WINDOWS) || PLATFORM(GTK)) && ENABLE(JIT)
+#define ENABLE_SAMPLING_PROFILER 1
+#else
+#define ENABLE_SAMPLING_PROFILER 0
#endif
-
-/* Generational collector for JSC */
-#if !defined(ENABLE_GGC)
-#define ENABLE_GGC 0
#endif
/* Counts uses of write barriers using sampling counters. Be sure to also
@@ -804,19 +813,51 @@
/* Configure the JIT */
#if CPU(X86) && COMPILER(MSVC)
#define JSC_HOST_CALL __fastcall
-#elif CPU(X86) && COMPILER(GCC)
+#elif CPU(X86) && COMPILER(GCC_OR_CLANG)
#define JSC_HOST_CALL __attribute__ ((fastcall))
#else
#define JSC_HOST_CALL
#endif
+#if CPU(X86) && OS(WINDOWS)
+#define CALLING_CONVENTION_IS_STDCALL 1
+#ifndef CDECL
+#if COMPILER(MSVC)
+#define CDECL __cdecl
+#else
+#define CDECL __attribute__ ((__cdecl))
+#endif
+#endif
+#else
+#define CALLING_CONVENTION_IS_STDCALL 0
+#endif
+
+#if CPU(X86)
+#define WTF_COMPILER_SUPPORTS_FASTCALL_CALLING_CONVENTION 1
+#ifndef FASTCALL
+#if COMPILER(MSVC)
+#define FASTCALL __fastcall
+#else
+#define FASTCALL __attribute__ ((fastcall))
+#endif
+#endif
+#else
+#define WTF_COMPILER_SUPPORTS_FASTCALL_CALLING_CONVENTION 0
+#endif
+
+#if ENABLE(JIT) && CALLING_CONVENTION_IS_STDCALL
+#define JIT_OPERATION CDECL
+#else
+#define JIT_OPERATION
+#endif
+
/* Configure the interpreter */
-#if COMPILER(GCC)
+#if COMPILER(GCC_OR_CLANG)
#define HAVE_COMPUTED_GOTO 1
#endif
/* Determine if we need to enable Computed Goto Opcodes or not: */
-#if HAVE(COMPUTED_GOTO) && ENABLE(LLINT)
+#if HAVE(COMPUTED_GOTO) || ENABLE(JIT)
#define ENABLE_COMPUTED_GOTO_OPCODES 1
#endif
@@ -824,7 +865,7 @@
#define ENABLE_REGEXP_TRACING 0
/* Yet Another Regex Runtime - turned on by default for JIT enabled ports. */
-#if !defined(ENABLE_YARR_JIT) && (ENABLE(JIT) || ENABLE(LLINT_C_LOOP))
+#if !defined(ENABLE_YARR_JIT) && ENABLE(JIT)
#define ENABLE_YARR_JIT 1
/* Setting this flag compares JIT results with interpreter results. */
@@ -852,72 +893,61 @@
#endif
#endif
-/* FIXME: We currently unconditionally use spearate stacks. When we switch to using the
- C stack for JS frames, we'll need to make the following conditional on ENABLE(LLINT_CLOOP)
- only.
-*/
-#if ENABLE(LLINT_CLOOP) || 1
-#define WTF_USE_SEPARATE_C_AND_JS_STACK 1
+/* Enable the following if you want to use the MacroAssembler::probe() facility
+ to do JIT debugging. */
+#if (CPU(X86) || CPU(X86_64) || CPU(ARM64) || (CPU(ARM_THUMB2) && PLATFORM(IOS))) && ENABLE(JIT) && OS(DARWIN) && !defined(NDEBUG)
+#define ENABLE_MASM_PROBE 1
+#else
+#define ENABLE_MASM_PROBE 0
#endif
-/* Pick which allocator to use; we only need an executable allocator if the assembler is compiled in.
- On x86-64 we use a single fixed mmap, on other platforms we mmap on demand. */
-#if ENABLE(ASSEMBLER)
-#if CPU(X86_64) && !OS(WINDOWS) || PLATFORM(IOS)
-#define ENABLE_EXECUTABLE_ALLOCATOR_FIXED 1
+#ifndef ENABLE_EXCEPTION_SCOPE_VERIFICATION
+#ifdef NDEBUG
+#define ENABLE_EXCEPTION_SCOPE_VERIFICATION 0
#else
-#define ENABLE_EXECUTABLE_ALLOCATOR_DEMAND 1
+#define ENABLE_EXCEPTION_SCOPE_VERIFICATION 1
#endif
#endif
/* CSS Selector JIT Compiler */
#if !defined(ENABLE_CSS_SELECTOR_JIT)
-#if CPU(X86_64) && ENABLE(JIT) && PLATFORM(MAC)
+#if (CPU(X86_64) || CPU(ARM64) || (CPU(ARM_THUMB2) && PLATFORM(IOS))) && ENABLE(JIT) && (OS(DARWIN) || PLATFORM(GTK))
#define ENABLE_CSS_SELECTOR_JIT 1
#else
#define ENABLE_CSS_SELECTOR_JIT 0
#endif
#endif
-/* Accelerated compositing */
-#if PLATFORM(MAC) || PLATFORM(IOS) || (PLATFORM(WIN) && !USE(WINGDI) && !PLATFORM(WIN_CAIRO))
-#define WTF_USE_ACCELERATED_COMPOSITING 1
-#endif
-
-#if ENABLE(WEBGL) && !defined(WTF_USE_3D_GRAPHICS)
-#define WTF_USE_3D_GRAPHICS 1
-#endif
-
#if ENABLE(WEBGL) && PLATFORM(WIN)
-#define WTF_USE_OPENGL 1
-#define WTF_USE_OPENGL_ES_2 1
-#define WTF_USE_EGL 1
+#define USE_OPENGL 1
+#define USE_OPENGL_ES_2 1
+#define USE_EGL 1
#endif
#if ENABLE(VIDEO) && PLATFORM(WIN_CAIRO)
-#define WTF_USE_GLIB 1
-#define WTF_USE_GSTREAMER 1
-#define GST_API_VERSION_1 1
+#if ENABLE(GSTREAMER_WINCAIRO)
+#define USE_MEDIA_FOUNDATION 0
+#define USE_GLIB 1
+#define USE_GSTREAMER 1
+#else
+#define USE_MEDIA_FOUNDATION 1
#endif
-
-#if USE(TEXTURE_MAPPER) && USE(3D_GRAPHICS) && !defined(WTF_USE_TEXTURE_MAPPER_GL)
-#define WTF_USE_TEXTURE_MAPPER_GL 1
#endif
-/* Compositing on the UI-process in WebKit2 */
-#if PLATFORM(MAC) || PLATFORM(IOS)
-#define WTF_USE_PROTECTION_SPACE_AUTH_CALLBACK 1
+#if PLATFORM(WIN_CAIRO)
+#define USE_TEXTURE_MAPPER 1
#endif
-/* Set up a define for a common error that is intended to cause a build error -- thus the space after Error. */
-#define WTF_PLATFORM_CFNETWORK Error USE_macro_should_be_used_with_CFNETWORK
+#if USE(TEXTURE_MAPPER) && ENABLE(GRAPHICS_CONTEXT_3D) && !defined(USE_TEXTURE_MAPPER_GL)
+#define USE_TEXTURE_MAPPER_GL 1
+#endif
-#if PLATFORM(WIN)
-#define WTF_USE_CROSS_PLATFORM_CONTEXT_MENUS 1
+#if PLATFORM(COCOA)
+#define USE_PROTECTION_SPACE_AUTH_CALLBACK 1
#endif
-#if PLATFORM(MAC) && HAVE(ACCESSIBILITY)
-#define WTF_USE_ACCESSIBILITY_CONTEXT_MENUS 1
+#if PLATFORM(COCOA) && HAVE(ACCESSIBILITY)
+#define USE_ACCESSIBILITY_CONTEXT_MENUS 1
#endif
#if CPU(ARM_THUMB2) || CPU(ARM64)
@@ -933,37 +963,27 @@
#endif
#if USE(GLIB)
-#include <wtf/gobject/GTypedefs.h>
+#include <wtf/glib/GTypedefs.h>
#endif
-/* FIXME: This define won't be needed once #27551 is fully landed. However,
+/* FIXME: This define won't be needed once #27551 is fully landed. However,
since most ports try to support sub-project independence, adding new headers
to WTF causes many ports to break, and so this way we can address the build
breakages one port at a time. */
-#if !defined(WTF_USE_EXPORT_MACROS) && (PLATFORM(MAC) || PLATFORM(WIN))
-#define WTF_USE_EXPORT_MACROS 1
+#if !defined(USE_EXPORT_MACROS) && (PLATFORM(COCOA) || PLATFORM(WIN))
+#define USE_EXPORT_MACROS 1
#endif
-#if !defined(WTF_USE_EXPORT_MACROS_FOR_TESTING) && (PLATFORM(GTK) || PLATFORM(WIN))
-#define WTF_USE_EXPORT_MACROS_FOR_TESTING 1
+#if !defined(USE_EXPORT_MACROS_FOR_TESTING) && (PLATFORM(GTK) || PLATFORM(WIN))
+#define USE_EXPORT_MACROS_FOR_TESTING 1
#endif
-#if PLATFORM(GTK) || PLATFORM(EFL)
-#define WTF_USE_UNIX_DOMAIN_SOCKETS 1
-#endif
-
-#if !defined(WTF_USE_IMLANG_FONT_LINK2) && !OS(WINCE)
-#define WTF_USE_IMLANG_FONT_LINK2 1
-#endif
-
-#if !defined(ENABLE_COMPARE_AND_SWAP) && (OS(WINDOWS) || (COMPILER(GCC) && (CPU(X86) || CPU(X86_64) || CPU(ARM_THUMB2) || CPU(ARM64))))
-#define ENABLE_COMPARE_AND_SWAP 1
+#if PLATFORM(GTK)
+#define USE_UNIX_DOMAIN_SOCKETS 1
#endif
-#define ENABLE_OBJECT_MARK_LOGGING 0
-
-#if !defined(ENABLE_PARALLEL_GC) && !ENABLE(OBJECT_MARK_LOGGING) && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(GTK)) && ENABLE(COMPARE_AND_SWAP)
-#define ENABLE_PARALLEL_GC 1
+#if !defined(USE_IMLANG_FONT_LINK2)
+#define USE_IMLANG_FONT_LINK2 1
#endif
#if !defined(ENABLE_GC_VALIDATION) && !defined(NDEBUG)
@@ -974,102 +994,208 @@
#define ENABLE_BINDING_INTEGRITY 1
#endif
-#if PLATFORM(IOS) || PLATFORM(MAC)
-#define WTF_USE_AVFOUNDATION 1
+#if PLATFORM(COCOA)
+#define USE_AVFOUNDATION 1
#endif
-#if (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 60000) || ((PLATFORM(MAC) && !PLATFORM(IOS)) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080)
-#define WTF_USE_COREMEDIA 1
+#if !defined(ENABLE_TREE_DEBUGGING)
+#if !defined(NDEBUG)
+#define ENABLE_TREE_DEBUGGING 1
+#else
+#define ENABLE_TREE_DEBUGGING 0
+#endif
+#endif
+
+#if PLATFORM(IOS) || PLATFORM(MAC)
+#define USE_COREMEDIA 1
#define HAVE_AVFOUNDATION_VIDEO_OUTPUT 1
#endif
-#if (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 50000) || ((PLATFORM(MAC) && !PLATFORM(IOS)) || (OS(WINDOWS) && USE(CG)) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080)
+#if PLATFORM(IOS) || PLATFORM(MAC) || (OS(WINDOWS) && USE(CG))
#define HAVE_AVFOUNDATION_MEDIA_SELECTION_GROUP 1
#endif
-#if (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000) || (((PLATFORM(MAC) && !PLATFORM(IOS)) || (OS(WINDOWS) && USE(CG))) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+#if PLATFORM(IOS) || PLATFORM(MAC) || (OS(WINDOWS) && USE(CG))
#define HAVE_AVFOUNDATION_LEGIBLE_OUTPUT_SUPPORT 1
#define HAVE_MEDIA_ACCESSIBILITY_FRAMEWORK 1
#endif
-#if (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 60000) || ((PLATFORM(MAC) && !PLATFORM(IOS)) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+#if PLATFORM(IOS) || PLATFORM(MAC)
#define HAVE_AVFOUNDATION_LOADER_DELEGATE 1
#endif
-#if (PLATFORM(MAC) && !PLATFORM(IOS)) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
-#define WTF_USE_VIDEOTOOLBOX 1
+#if PLATFORM(MAC)
+#define USE_VIDEOTOOLBOX 1
#endif
-#if PLATFORM(MAC) || PLATFORM(GTK) || (PLATFORM(WIN) && !USE(WINGDI) && !PLATFORM(WIN_CAIRO))
-#define WTF_USE_REQUEST_ANIMATION_FRAME_TIMER 1
+#if PLATFORM(COCOA) || PLATFORM(GTK) || (PLATFORM(WIN) && !USE(WINGDI))
+#define USE_REQUEST_ANIMATION_FRAME_TIMER 1
#endif
-#if PLATFORM(MAC)
-#define WTF_USE_REQUEST_ANIMATION_FRAME_DISPLAY_MONITOR 1
+#if PLATFORM(COCOA)
+#define USE_REQUEST_ANIMATION_FRAME_DISPLAY_MONITOR 1
#endif
-#if PLATFORM(MAC) && !PLATFORM(IOS)
-#define WTF_USE_COREAUDIO 1
+#if PLATFORM(MAC)
+#define USE_COREAUDIO 1
#endif
-#if !defined(WTF_USE_ZLIB)
-#define WTF_USE_ZLIB 1
+#if !defined(USE_ZLIB)
+#define USE_ZLIB 1
#endif
-#if PLATFORM(IOS) || (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080)
-#define WTF_USE_CONTENT_FILTERING 1
+#ifndef HAVE_QOS_CLASSES
+#if PLATFORM(COCOA)
+#define HAVE_QOS_CLASSES 1
+#endif
#endif
+#ifndef HAVE_VOUCHERS
+#if PLATFORM(COCOA)
+#define HAVE_VOUCHERS 1
+#endif
+#endif
-#define WTF_USE_GRAMMAR_CHECKING 1
+#define USE_GRAMMAR_CHECKING 1
-#if PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(EFL)
-#define WTF_USE_UNIFIED_TEXT_CHECKING 1
+#if PLATFORM(COCOA) || PLATFORM(GTK)
+#define USE_UNIFIED_TEXT_CHECKING 1
#endif
-#if !PLATFORM(IOS) && PLATFORM(MAC)
-#define WTF_USE_AUTOMATIC_TEXT_REPLACEMENT 1
+#if PLATFORM(MAC)
+#define USE_AUTOMATIC_TEXT_REPLACEMENT 1
#endif
-#if PLATFORM(MAC) && !PLATFORM(IOS)
+#if PLATFORM(MAC)
/* Some platforms provide UI for suggesting autocorrection. */
-#define WTF_USE_AUTOCORRECTION_PANEL 1
+#define USE_AUTOCORRECTION_PANEL 1
#endif
-#if PLATFORM(MAC)
+#if PLATFORM(COCOA)
/* Some platforms use spelling and autocorrection markers to provide visual cue. On such platform, if word with marker is edited, we need to remove the marker. */
-#define WTF_USE_MARKER_REMOVAL_UPON_EDITING 1
+#define USE_MARKER_REMOVAL_UPON_EDITING 1
#endif
-#if PLATFORM(IOS)
-#define WTF_USE_PLATFORM_TEXT_TRACK_MENU 1
+#if PLATFORM(MAC)
+#define USE_INSERTION_UNDO_GROUPING 1
#endif
-#if PLATFORM(MAC) || PLATFORM(IOS)
-#define WTF_USE_AUDIO_SESSION 1
+#if (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100) || PLATFORM(IOS)
+#define HAVE_TIMINGDATAOPTIONS 1
#endif
-#if PLATFORM(MAC) && !PLATFORM(IOS_SIMULATOR)
-#define WTF_USE_IOSURFACE 1
+#if PLATFORM(COCOA)
+#define USE_AUDIO_SESSION 1
#endif
-#if PLATFORM(GTK) || PLATFORM(EFL)
+#if PLATFORM(COCOA) && !PLATFORM(IOS_SIMULATOR)
+#define USE_IOSURFACE 1
+#endif
+
+#if PLATFORM(COCOA)
+#define ENABLE_RESOURCE_USAGE 1
+#endif
+
+#if PLATFORM(GTK)
#undef ENABLE_OPENTYPE_VERTICAL
#define ENABLE_OPENTYPE_VERTICAL 1
+#define ENABLE_CSS3_TEXT_DECORATION_SKIP_INK 1
#endif
-#if PLATFORM(MAC)
+#if PLATFORM(GTK)
+#define USE_WOFF2 1
+#endif
+
+#if PLATFORM(COCOA)
#define ENABLE_CSS3_TEXT_DECORATION_SKIP_INK 1
#endif
+#if PLATFORM(IOS) || (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED > 101000)
+#define ENABLE_PLATFORM_FONT_LOOKUP 1
+#endif
+
#if COMPILER(MSVC)
+#undef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
#undef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
-#undef _HAS_EXCEPTIONS
-#define _HAS_EXCEPTIONS 1
#endif
-#if PLATFORM(MAC) && !PLATFORM(IOS) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
+#if PLATFORM(MAC)
#define HAVE_NS_ACTIVITY 1
#endif
+#if (OS(DARWIN) && USE(CG)) || (USE(FREETYPE) && !PLATFORM(GTK)) || (PLATFORM(WIN) && (USE(CG) || USE(CAIRO)))
+#undef ENABLE_OPENTYPE_MATH
+#define ENABLE_OPENTYPE_MATH 1
+#endif
+
+/* Set TARGET_OS_IPHONE to 0 by default to allow using it as a guard
+ * in cross-platform the same way as it is used in OS(DARWIN) code. */
+#if !defined(TARGET_OS_IPHONE) && !OS(DARWIN)
+#define TARGET_OS_IPHONE 0
+#endif
+
+#if PLATFORM(COCOA)
+#define USE_MEDIATOOLBOX 1
+#endif
+
+/* While 10.10 has support for fences, it is missing some API important for our integration of them. */
+#if PLATFORM(IOS) || (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
+#define HAVE_COREANIMATION_FENCES 1
+#endif
+
+/* FIXME: Enable USE_OS_LOG when building with the public iOS 10 SDK once we fix <rdar://problem/27758343>. */
+#if (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200) || (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000 && USE(APPLE_INTERNAL_SDK))
+#define USE_OS_LOG 1
+#if USE(APPLE_INTERNAL_SDK)
+#define USE_OS_STATE 1
+#endif
+#endif
+
+#if (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200) || (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
+#define HAVE_SEC_TRUST_SERIALIZATION 1
+#endif
+
+#if !defined(WTF_DEFAULT_EVENT_LOOP)
+#define WTF_DEFAULT_EVENT_LOOP 1
+#endif
+
+#if WTF_DEFAULT_EVENT_LOOP
+#if PLATFORM(WIN)
+/* Use Windows message pump abstraction.
+ * Even if the port is AppleWin, we use the Windows message pump system for the event loop,
+ * so that USE(WINDOWS_EVENT_LOOP) && USE(CF) can be true.
+ * And PLATFORM(WIN) and PLATFORM(GTK) are exclusive. If the port is GTK,
+ * PLATFORM(WIN) should be false. And in that case, GLib's event loop is used.
+ */
+#define USE_WINDOWS_EVENT_LOOP 1
+#elif PLATFORM(COCOA)
+/* OS X and IOS. Use CoreFoundation & GCD abstraction. */
+#define USE_COCOA_EVENT_LOOP 1
+#elif USE(GLIB)
+/* Use GLib's event loop abstraction. Primarily GTK port uses it. */
+#define USE_GLIB_EVENT_LOOP 1
+#else
+#define USE_GENERIC_EVENT_LOOP 1
+#endif
+#endif
+
+#if PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200
+#define USE_MEDIAREMOTE 1
+#endif
+
+#if COMPILER(MSVC)
+/* Enable strict runtime stack buffer checks. */
+#pragma strict_gs_check(on)
+#endif
+
+#if PLATFORM(MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101201 && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200
+#define HAVE_TOUCH_BAR 1
+#define HAVE_ADVANCED_SPELL_CHECKING 1
+
+#if defined(__LP64__)
+#define ENABLE_WEB_PLAYBACK_CONTROLS_MANAGER 1
+#endif
+#endif /* PLATFORM(MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101201 && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200 */
+
#endif /* WTF_Platform_h */
diff --git a/Source/WTF/wtf/PlatformGTK.cmake b/Source/WTF/wtf/PlatformGTK.cmake
new file mode 100644
index 000000000..c32f7ebbf
--- /dev/null
+++ b/Source/WTF/wtf/PlatformGTK.cmake
@@ -0,0 +1,26 @@
+set(WTF_LIBRARY_TYPE STATIC)
+set(WTF_OUTPUT_NAME WTFGTK)
+
+list(APPEND WTF_SOURCES
+ generic/WorkQueueGeneric.cpp
+ glib/GLibUtilities.cpp
+ glib/GRefPtr.cpp
+ glib/MainThreadGLib.cpp
+ glib/RunLoopGLib.cpp
+ PlatformUserPreferredLanguagesUnix.cpp
+ UniStdExtras.cpp
+
+ text/unix/TextBreakIteratorInternalICUUnix.cpp
+)
+
+list(APPEND WTF_LIBRARIES
+ ${GLIB_GIO_LIBRARIES}
+ ${GLIB_GOBJECT_LIBRARIES}
+ ${GLIB_LIBRARIES}
+ pthread
+ ${ZLIB_LIBRARIES}
+)
+
+list(APPEND WTF_SYSTEM_INCLUDE_DIRECTORIES
+ ${GLIB_INCLUDE_DIRS}
+)
diff --git a/Source/WTF/wtf/PlatformJSCOnly.cmake b/Source/WTF/wtf/PlatformJSCOnly.cmake
new file mode 100644
index 000000000..679fb2b7a
--- /dev/null
+++ b/Source/WTF/wtf/PlatformJSCOnly.cmake
@@ -0,0 +1,32 @@
+list(APPEND WTF_SOURCES
+ PlatformUserPreferredLanguagesUnix.cpp
+
+ text/jsconly/TextBreakIteratorInternalICUJSCOnly.cpp
+)
+
+if (LOWERCASE_EVENT_LOOP_TYPE STREQUAL "glib")
+ list(APPEND WTF_SOURCES
+ glib/GRefPtr.cpp
+ glib/MainThreadGLib.cpp
+ glib/RunLoopGLib.cpp
+ glib/WorkQueueGLib.cpp
+ )
+ list(APPEND WTF_SYSTEM_INCLUDE_DIRECTORIES
+ ${GLIB_INCLUDE_DIRS}
+ )
+ list(APPEND WTF_LIBRARIES
+ ${GLIB_GIO_LIBRARIES}
+ ${GLIB_GOBJECT_LIBRARIES}
+ ${GLIB_LIBRARIES}
+ )
+else ()
+ list(APPEND WTF_SOURCES
+ generic/MainThreadGeneric.cpp
+ generic/RunLoopGeneric.cpp
+ generic/WorkQueueGeneric.cpp
+ )
+endif ()
+
+list(APPEND WTF_LIBRARIES
+ ${CMAKE_THREAD_LIBS_INIT}
+)
diff --git a/Source/WTF/wtf/PlatformUserPreferredLanguages.h b/Source/WTF/wtf/PlatformUserPreferredLanguages.h
new file mode 100644
index 000000000..93580fc76
--- /dev/null
+++ b/Source/WTF/wtf/PlatformUserPreferredLanguages.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PlatformUserPreferredLanguages_h
+#define PlatformUserPreferredLanguages_h
+
+#include <wtf/Vector.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE void setPlatformUserPreferredLanguagesChangedCallback(void (*)());
+
+// This is thread-safe.
+WTF_EXPORT_PRIVATE Vector<String> platformUserPreferredLanguages();
+
+} // namespace WTF
+
+using WTF::setPlatformUserPreferredLanguagesChangedCallback;
+using WTF::platformUserPreferredLanguages;
+
+#endif // PlatformUserPreferredLanguages_h
+
diff --git a/Source/WTF/wtf/PlatformUserPreferredLanguagesUnix.cpp b/Source/WTF/wtf/PlatformUserPreferredLanguagesUnix.cpp
new file mode 100644
index 000000000..a1872d89a
--- /dev/null
+++ b/Source/WTF/wtf/PlatformUserPreferredLanguagesUnix.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2007 Alp Toker <alp@atoker.com>
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include "config.h"
+#include "PlatformUserPreferredLanguages.h"
+
+#include <locale.h>
+#include <wtf/Vector.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+void setPlatformUserPreferredLanguagesChangedCallback(void (*)()) { }
+
+// Using pango_language_get_default() here is not an option, because
+// it doesn't support changing the locale in runtime, so it returns
+// always the same value.
+static String platformLanguage()
+{
+ String localeDefault(setlocale(LC_CTYPE, nullptr));
+ if (localeDefault.isEmpty() || equalIgnoringASCIICase(localeDefault, "C") || equalIgnoringASCIICase(localeDefault, "POSIX"))
+ return ASCIILiteral("en-US");
+
+ String normalizedDefault = localeDefault;
+ normalizedDefault.replace('_', '-');
+ normalizedDefault.truncate(normalizedDefault.find('.'));
+ return normalizedDefault;
+}
+
+Vector<String> platformUserPreferredLanguages()
+{
+ return { platformLanguage() };
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/PlatformUserPreferredLanguagesWin.cpp b/Source/WTF/wtf/PlatformUserPreferredLanguagesWin.cpp
new file mode 100644
index 000000000..79ad23c54
--- /dev/null
+++ b/Source/WTF/wtf/PlatformUserPreferredLanguagesWin.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2007, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PlatformUserPreferredLanguages.h"
+
+#include <mutex>
+#include <windows.h>
+#include <wtf/Lock.h>
+#include <wtf/Vector.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+static StaticLock platformLanguageMutex;
+
+void setPlatformUserPreferredLanguagesChangedCallback(void (*)()) { }
+
+static String localeInfo(LCTYPE localeType, const String& fallback)
+{
+ LANGID langID = GetUserDefaultUILanguage();
+ int localeChars = GetLocaleInfo(langID, localeType, 0, 0);
+ if (!localeChars)
+ return fallback;
+ UChar* localeNameBuf;
+ String localeName = String::createUninitialized(localeChars, localeNameBuf);
+ localeChars = GetLocaleInfo(langID, localeType, localeNameBuf, localeChars);
+ if (!localeChars)
+ return fallback;
+ if (localeName.isEmpty())
+ return fallback;
+
+ localeName.truncate(localeName.length() - 1);
+ return localeName;
+}
+
+static String platformLanguage()
+{
+ std::lock_guard<StaticLock> lock(platformLanguageMutex);
+
+ static String computedDefaultLanguage;
+ if (!computedDefaultLanguage.isEmpty())
+ return computedDefaultLanguage.isolatedCopy();
+
+ String languageName = localeInfo(LOCALE_SISO639LANGNAME, "en");
+ String countryName = localeInfo(LOCALE_SISO3166CTRYNAME, String());
+
+ if (countryName.isEmpty())
+ computedDefaultLanguage = languageName;
+ else
+ computedDefaultLanguage = languageName + '-' + countryName;
+
+ return computedDefaultLanguage;
+}
+
+Vector<String> platformUserPreferredLanguages()
+{
+ return { platformLanguage() };
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/PointerComparison.h b/Source/WTF/wtf/PointerComparison.h
new file mode 100644
index 000000000..fe64297df
--- /dev/null
+++ b/Source/WTF/wtf/PointerComparison.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_PointerComparison_h
+#define WTF_PointerComparison_h
+
+namespace WTF {
+
+template<typename T> inline bool arePointingToEqualData(const T& a, const T& b)
+{
+ return a == b || (a && b && *a == *b);
+}
+
+} // namespace WTF
+
+using WTF::arePointingToEqualData;
+
+#endif // WTF_PointerComparison_h
diff --git a/Source/WTF/wtf/PrintStream.cpp b/Source/WTF/wtf/PrintStream.cpp
index bb7d39db9..97faf3031 100644
--- a/Source/WTF/wtf/PrintStream.cpp
+++ b/Source/WTF/wtf/PrintStream.cpp
@@ -28,6 +28,7 @@
#include <stdio.h>
#include <wtf/text/CString.h>
+#include <wtf/text/UniquedStringImpl.h>
#include <wtf/text/WTFString.h>
namespace WTF {
@@ -43,15 +44,49 @@ void PrintStream::printf(const char* format, ...)
va_end(argList);
}
+void PrintStream::printfVariableFormat(const char* format, ...)
+{
+#if COMPILER(CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wformat-nonliteral"
+#elif COMPILER(GCC)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+ va_list argList;
+ va_start(argList, format);
+ vprintf(format, argList);
+ va_end(argList);
+#if COMPILER(CLANG)
+#pragma clang diagnostic pop
+#elif COMPILER(GCC)
+#pragma GCC diagnostic pop
+#endif
+}
+
void PrintStream::flush()
{
}
+PrintStream& PrintStream::begin()
+{
+ return *this;
+}
+
+void PrintStream::end()
+{
+}
+
void printInternal(PrintStream& out, const char* string)
{
out.printf("%s", string);
}
+void printInternal(PrintStream& out, const StringView& string)
+{
+ out.print(string.utf8());
+}
+
void printInternal(PrintStream& out, const CString& string)
{
out.print(string.data());
diff --git a/Source/WTF/wtf/PrintStream.h b/Source/WTF/wtf/PrintStream.h
index 329ad0782..6a8060b1d 100644
--- a/Source/WTF/wtf/PrintStream.h
+++ b/Source/WTF/wtf/PrintStream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,18 +26,23 @@
#ifndef PrintStream_h
#define PrintStream_h
+#include <memory>
#include <stdarg.h>
#include <wtf/FastMalloc.h>
#include <wtf/Noncopyable.h>
-#include <wtf/Platform.h>
+#include <wtf/Optional.h>
#include <wtf/RawPointer.h>
+#include <wtf/RefPtr.h>
#include <wtf/StdLibExtras.h>
namespace WTF {
+class AtomicStringImpl;
class CString;
class String;
class StringImpl;
+class StringView;
+class UniquedStringImpl;
class PrintStream {
WTF_MAKE_FAST_ALLOCATED; WTF_MAKE_NONCOPYABLE(PrintStream);
@@ -46,216 +51,64 @@ public:
virtual ~PrintStream();
WTF_EXPORT_PRIVATE void printf(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+ WTF_EXPORT_PRIVATE void printfVariableFormat(const char* format, ...);
virtual void vprintf(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(2, 0) = 0;
// Typically a no-op for many subclasses of PrintStream, this is a hint that
// the implementation should flush its buffers if it had not done so already.
virtual void flush();
- template<typename T>
- void print(const T& value)
+ template<typename Func>
+ void atomically(const Func& func)
{
- printInternal(*this, value);
- }
-
- template<typename T1, typename T2>
- void print(const T1& value1, const T2& value2)
- {
- print(value1);
- print(value2);
- }
-
- template<typename T1, typename T2, typename T3>
- void print(const T1& value1, const T2& value2, const T3& value3)
- {
- print(value1);
- print(value2);
- print(value3);
- }
-
- template<typename T1, typename T2, typename T3, typename T4>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- print(value9);
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- print(value9);
- print(value10);
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- print(value9);
- print(value10);
- print(value11);
+ func(begin());
+ end();
}
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12)
+ template<typename... Types>
+ void print(const Types&... values)
{
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- print(value9);
- print(value10);
- print(value11);
- print(value12);
+ atomically(
+ [&] (PrintStream& out) {
+ out.printImpl(values...);
+ });
}
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13)
+ template<typename... Types>
+ void println(const Types&... values)
{
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- print(value9);
- print(value10);
- print(value11);
- print(value12);
- print(value13);
+ print(values..., "\n");
}
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13, const T14& value14)
+
+protected:
+ void printImpl() { }
+
+ template<typename T, typename... Types>
+ void printImpl(const T& value, const Types&... remainingValues)
{
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- print(value9);
- print(value10);
- print(value11);
- print(value12);
- print(value13);
- print(value14);
+ printInternal(*this, value);
+ printImpl(remainingValues...);
}
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14, typename T15>
- void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13, const T14& value14, const T15& value15)
- {
- print(value1);
- print(value2);
- print(value3);
- print(value4);
- print(value5);
- print(value6);
- print(value7);
- print(value8);
- print(value9);
- print(value10);
- print(value11);
- print(value12);
- print(value13);
- print(value14);
- print(value15);
- }
+ virtual PrintStream& begin();
+ virtual void end();
};
WTF_EXPORT_PRIVATE void printInternal(PrintStream&, const char*);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, const StringView&);
WTF_EXPORT_PRIVATE void printInternal(PrintStream&, const CString&);
WTF_EXPORT_PRIVATE void printInternal(PrintStream&, const String&);
WTF_EXPORT_PRIVATE void printInternal(PrintStream&, const StringImpl*);
+inline void printInternal(PrintStream& out, const AtomicStringImpl* value) { printInternal(out, bitwise_cast<const StringImpl*>(value)); }
+inline void printInternal(PrintStream& out, const UniquedStringImpl* value) { printInternal(out, bitwise_cast<const StringImpl*>(value)); }
+inline void printInternal(PrintStream& out, const UniquedStringImpl& value) { printInternal(out, &value); }
inline void printInternal(PrintStream& out, char* value) { printInternal(out, static_cast<const char*>(value)); }
inline void printInternal(PrintStream& out, CString& value) { printInternal(out, static_cast<const CString&>(value)); }
inline void printInternal(PrintStream& out, String& value) { printInternal(out, static_cast<const String&>(value)); }
inline void printInternal(PrintStream& out, StringImpl* value) { printInternal(out, static_cast<const StringImpl*>(value)); }
+inline void printInternal(PrintStream& out, AtomicStringImpl* value) { printInternal(out, static_cast<const AtomicStringImpl*>(value)); }
+inline void printInternal(PrintStream& out, UniquedStringImpl* value) { printInternal(out, static_cast<const UniquedStringImpl*>(value)); }
+inline void printInternal(PrintStream& out, UniquedStringImpl& value) { printInternal(out, &value); }
WTF_EXPORT_PRIVATE void printInternal(PrintStream&, bool);
WTF_EXPORT_PRIVATE void printInternal(PrintStream&, signed char); // NOTE: this prints as a number, not as a character; use CharacterDump if you want the character
WTF_EXPORT_PRIVATE void printInternal(PrintStream&, unsigned char); // NOTE: see above.
@@ -339,6 +192,18 @@ private:
template<typename T>
PointerDump<T> pointerDump(const T* ptr) { return PointerDump<T>(ptr); }
+template<typename T>
+void printInternal(PrintStream& out, const std::unique_ptr<T>& value)
+{
+ out.print(pointerDump(value.get()));
+}
+
+template<typename T>
+void printInternal(PrintStream& out, const RefPtr<T>& value)
+{
+ out.print(pointerDump(value.get()));
+}
+
template<typename T, typename U>
class ValueInContext {
public:
@@ -365,6 +230,34 @@ ValueInContext<T, U> inContext(const T& value, U* context)
}
template<typename T, typename U>
+class PointerDumpInContext {
+public:
+ PointerDumpInContext(const T* ptr, U* context)
+ : m_ptr(ptr)
+ , m_context(context)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_ptr)
+ m_ptr->dumpInContext(out, m_context);
+ else
+ out.print("(null)");
+ }
+
+private:
+ const T* m_ptr;
+ U* m_context;
+};
+
+template<typename T, typename U>
+PointerDumpInContext<T, U> pointerDumpInContext(const T* ptr, U* context)
+{
+ return PointerDumpInContext<T, U>(ptr, context);
+}
+
+template<typename T, typename U>
class ValueIgnoringContext {
public:
ValueIgnoringContext(const U& value)
@@ -388,14 +281,70 @@ ValueIgnoringContext<T, U> ignoringContext(const U& value)
return ValueIgnoringContext<T, U>(value);
}
+template<unsigned index, typename... Types>
+struct FormatImplUnpacker {
+ template<typename... Args>
+ static void unpack(PrintStream& out, const std::tuple<Types...>& tuple, const Args&... values);
+};
+
+template<typename... Types>
+struct FormatImplUnpacker<0, Types...> {
+ template<typename... Args>
+ static void unpack(PrintStream& out, const std::tuple<Types...>& tuple, const Args&... values)
+ {
+ out.printfVariableFormat(std::get<0>(tuple), values...);
+ }
+};
+
+template<unsigned index, typename... Types>
+template<typename... Args>
+void FormatImplUnpacker<index, Types...>::unpack(PrintStream& out, const std::tuple<Types...>& tuple, const Args&... values)
+{
+ FormatImplUnpacker<index - 1, Types...>::unpack(out, tuple, std::get<index>(tuple), values...);
+}
+
+template<typename... Types>
+class FormatImpl {
+public:
+ FormatImpl(Types... values)
+ : m_values(values...)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ FormatImplUnpacker<sizeof...(Types) - 1, Types...>::unpack(out, m_values);
+ }
+
+private:
+ std::tuple<Types...> m_values;
+};
+
+template<typename... Types>
+FormatImpl<Types...> format(Types... values)
+{
+ return FormatImpl<Types...>(values...);
+}
+
+template<typename T>
+void printInternal(PrintStream& out, const std::optional<T>& value)
+{
+ if (value)
+ out.print(*value);
+ else
+ out.print("<nullopt>");
+}
+
} // namespace WTF
using WTF::CharacterDump;
using WTF::PointerDump;
using WTF::PrintStream;
+using WTF::format;
using WTF::ignoringContext;
using WTF::inContext;
using WTF::pointerDump;
+using WTF::pointerDumpInContext;
#endif // PrintStream_h
diff --git a/Source/WTF/wtf/ProcessID.h b/Source/WTF/wtf/ProcessID.h
index f2d53932e..f84716e8d 100644
--- a/Source/WTF/wtf/ProcessID.h
+++ b/Source/WTF/wtf/ProcessID.h
@@ -26,8 +26,6 @@
#ifndef ProcessID_h
#define ProcessID_h
-#include <wtf/Platform.h>
-
#if OS(UNIX)
#include <unistd.h>
#endif
diff --git a/Source/WTF/wtf/RAMSize.cpp b/Source/WTF/wtf/RAMSize.cpp
index 50b5222ea..deb69e188 100644
--- a/Source/WTF/wtf/RAMSize.cpp
+++ b/Source/WTF/wtf/RAMSize.cpp
@@ -27,10 +27,14 @@
#include "RAMSize.h"
#include "StdLibExtras.h"
+#include <mutex>
+
#if OS(DARWIN)
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/sysctl.h>
+#import <dispatch/dispatch.h>
+#import <mach/host_info.h>
+#import <mach/mach.h>
+#import <mach/mach_error.h>
+#import <math.h>
#elif OS(UNIX)
#include <unistd.h>
#elif OS(WINDOWS)
@@ -39,35 +43,40 @@
namespace WTF {
-static const size_t ramSizeGuess = 128 * MB;
+static const size_t ramSizeGuess = 512 * MB;
static size_t computeRAMSize()
{
-#if OS(DARWIN)
- int mib[2];
- uint64_t ramSize;
- size_t length;
+#if PLATFORM(IOS_SIMULATOR)
+ // Pretend we have 512MB of memory to make cache sizes behave like on device.
+ return ramSizeGuess;
+#elif OS(DARWIN)
+ host_basic_info_data_t hostInfo;
- mib[0] = CTL_HW;
- mib[1] = HW_MEMSIZE;
- length = sizeof(int64_t);
- int sysctlResult = sysctl(mib, 2, &ramSize, &length, 0, 0);
- if (sysctlResult == -1)
+ mach_port_t host = mach_host_self();
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+ kern_return_t r = host_info(host, HOST_BASIC_INFO, (host_info_t)&hostInfo, &count);
+ mach_port_deallocate(mach_task_self(), host);
+ if (r != KERN_SUCCESS) {
+ LOG_ERROR("%s : host_info(%d) : %s.\n", __FUNCTION__, r, mach_error_string(r));
return ramSizeGuess;
- return ramSize > std::numeric_limits<size_t>::max() ? std::numeric_limits<size_t>::max() : static_cast<size_t>(ramSize);
+ }
+
+ if (hostInfo.max_mem > std::numeric_limits<size_t>::max())
+ return std::numeric_limits<size_t>::max();
+
+ size_t sizeAccordingToKernel = static_cast<size_t>(hostInfo.max_mem);
+ size_t multiple = 128 * MB;
+
+ // Round up the memory size to a multiple of 128MB because max_mem may not be exactly 512MB
+ // (for example) and we have code that depends on those boundaries.
+ return ((sizeAccordingToKernel + multiple - 1) / multiple) * multiple;
#elif OS(UNIX)
long pages = sysconf(_SC_PHYS_PAGES);
long pageSize = sysconf(_SC_PAGE_SIZE);
if (pages == -1 || pageSize == -1)
return ramSizeGuess;
return pages * pageSize;
-#elif OS(WINCE)
- MEMORYSTATUS status;
- status.dwLength = sizeof(status);
- GlobalMemoryStatus(&status);
- if (status.dwTotalPhys <= 0)
- return ramSizeGuess;
- return status.dwTotalPhys;
#elif OS(WINDOWS)
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
@@ -80,7 +89,11 @@ static size_t computeRAMSize()
size_t ramSize()
{
- static const size_t ramSize = computeRAMSize();
+ static size_t ramSize;
+ static std::once_flag onceFlag;
+ std::call_once(onceFlag, [] {
+ ramSize = computeRAMSize();
+ });
return ramSize;
}
diff --git a/Source/WTF/wtf/RandomNumber.cpp b/Source/WTF/wtf/RandomNumber.cpp
index 7c764d3fc..d39035e61 100644
--- a/Source/WTF/wtf/RandomNumber.cpp
+++ b/Source/WTF/wtf/RandomNumber.cpp
@@ -12,10 +12,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
diff --git a/Source/WTF/wtf/RandomNumber.h b/Source/WTF/wtf/RandomNumber.h
index fc5711423..206bd6f84 100644
--- a/Source/WTF/wtf/RandomNumber.h
+++ b/Source/WTF/wtf/RandomNumber.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
diff --git a/Source/WTF/wtf/RandomNumberSeed.h b/Source/WTF/wtf/RandomNumberSeed.h
index 9290ec4ff..a65d7d96f 100644
--- a/Source/WTF/wtf/RandomNumberSeed.h
+++ b/Source/WTF/wtf/RandomNumberSeed.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -45,9 +45,6 @@ inline void initializeRandomNumberGenerator()
{
#if OS(DARWIN)
// On Darwin we use arc4random which initialises itself.
-#elif OS(WINCE)
- // initialize rand()
- srand(GetTickCount());
#elif COMPILER(MSVC) && defined(_CRT_RAND_S)
// On Windows we use rand_s which initialises itself
#elif OS(UNIX)
diff --git a/Source/WTF/wtf/RangeSet.h b/Source/WTF/wtf/RangeSet.h
new file mode 100644
index 000000000..75471ad7d
--- /dev/null
+++ b/Source/WTF/wtf/RangeSet.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_RangeSet_h
+#define WTF_RangeSet_h
+
+#include <wtf/ListDump.h>
+#include <wtf/MathExtras.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/Vector.h>
+
+namespace WTF {
+
+// A RangeSet is a set of numerical ranges. A value belongs to the set if it is within any of the
+// ranges. A range belongs to the set if every value in the range belongs to the set. A range overlaps
+// the set if any value in the range belongs to the set. You can add ranges and query range
+// membership. The internal representation is a list of ranges that gets periodically compacted. This
+// representation is optimal so long as the number of distinct ranges tends to be small, and the
+// number of range sets tends to be small as well. This works reasonably well in a bunch of compiler
+// algorithms, where the top range ends up being used a lot.
+//
+// The initial user of this is JSC::B3::HeapRange, which is used to perform alias analysis. You can
+// model new users on that class. Basically, you need to define:
+//
+// T::Type - the type of the members of the range. HeapRange uses unsigned.
+// T(T::Type begin, T::Type end) - construct a new range.
+// T::Type T::begin() const - instance method giving the inclusive beginning of the range.
+// T::Type T::end() const - instance method giving the exclusive end of the range.
+// void T::dump(PrintStream&) const - some kind of dumping.
+
+template<typename RangeType>
+class RangeSet {
+public:
+ typedef RangeType Range;
+ typedef typename Range::Type Type;
+
+ RangeSet()
+ {
+ }
+
+ ~RangeSet()
+ {
+ }
+
+ void add(const Range& range)
+ {
+ if (range.begin() == range.end())
+ return;
+
+ // We expect the range set to become top in a lot of cases. We also expect the same range to
+ // be added repeatedly. That's why this is here.
+ if (!m_ranges.isEmpty() && subsumesNonEmpty(m_ranges.last(), range))
+ return;
+
+ m_isCompact = false;
+
+ // We append without compacting only if doing so is guaranteed not to resize the vector.
+ if (m_ranges.size() + 1 < m_ranges.capacity()) {
+ m_ranges.append(range);
+ return;
+ }
+
+ m_ranges.append(range);
+ compact();
+ }
+
+ bool contains(const Range& range) const
+ {
+ if (range.begin() == range.end())
+ return false;
+
+ unsigned index = findRange(range);
+ if (index != UINT_MAX)
+ return subsumesNonEmpty(m_ranges[index], range);
+ return false;
+ }
+
+ bool overlaps(const Range& range) const
+ {
+ if (range.begin() == range.end())
+ return false;
+
+ return findRange(range) != UINT_MAX;
+ }
+
+ void clear()
+ {
+ m_ranges.clear();
+ m_isCompact = true;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ const_cast<RangeSet*>(this)->compact();
+ out.print(listDump(m_ranges));
+ }
+
+ void dumpRaw(PrintStream& out) const
+ {
+ out.print("{", listDump(m_ranges), ", isCompact = ", m_isCompact, "}");
+ }
+
+private:
+ void compact()
+ {
+ if (m_isCompact)
+ return;
+
+ if (m_ranges.isEmpty()) {
+ m_isCompact = true;
+ return;
+ }
+
+ std::sort(
+ m_ranges.begin(), m_ranges.end(),
+ [&] (const Range& a, const Range& b) -> bool {
+ return a.begin() < b.begin();
+ });
+
+ unsigned srcIndex = 1;
+ unsigned dstIndex = 1;
+ Range* lastRange = &m_ranges[0];
+ while (srcIndex < m_ranges.size()) {
+ Range range = m_ranges[srcIndex++];
+ ASSERT(range.begin() >= lastRange->begin());
+ if (range.end() <= lastRange->end())
+ continue;
+ if (range.begin() <= lastRange->end()) {
+ *lastRange = Range(lastRange->begin(), range.end());
+ continue;
+ }
+ ASSERT(!overlapsNonEmpty(*lastRange, range));
+ lastRange = &m_ranges[dstIndex++];
+ *lastRange = range;
+ }
+ m_ranges.resize(dstIndex);
+
+ m_isCompact = true;
+ }
+
+ static bool overlapsNonEmpty(const Range& a, const Range& b)
+ {
+ return nonEmptyRangesOverlap(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ static bool subsumesNonEmpty(const Range& a, const Range& b)
+ {
+ return a.begin() <= b.begin() && a.end() >= b.end();
+ }
+
+ unsigned findRange(const Range& range) const
+ {
+ const_cast<RangeSet*>(this)->compact();
+
+ // FIXME: Once we start using this in anger, we will want this to be a binary search.
+ for (unsigned i = 0; i < m_ranges.size(); ++i) {
+ if (overlapsNonEmpty(m_ranges[i], range))
+ return i;
+ }
+
+ return UINT_MAX;
+ }
+
+ Vector<Range, 8> m_ranges;
+ bool m_isCompact { true };
+};
+
+} // namespace WTF
+
+using WTF::RangeSet;
+
+#endif // WTF_RangeSet_h
+
diff --git a/Source/WTF/wtf/RecursiveLockAdapter.h b/Source/WTF/wtf/RecursiveLockAdapter.h
new file mode 100644
index 000000000..97a243b90
--- /dev/null
+++ b/Source/WTF/wtf/RecursiveLockAdapter.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Threading.h>
+
+namespace WTF {
+
+template<typename LockType>
+class RecursiveLockAdapter {
+public:
+ RecursiveLockAdapter()
+ {
+ }
+
+ void lock()
+ {
+ ThreadIdentifier me = currentThread();
+ if (me == m_owner) {
+ m_recursionCount++;
+ return;
+ }
+
+ m_lock.lock();
+ ASSERT(!m_owner);
+ ASSERT(!m_recursionCount);
+ m_owner = me;
+ m_recursionCount = 1;
+ }
+
+ void unlock()
+ {
+ if (--m_recursionCount)
+ return;
+ m_owner = 0;
+ m_lock.unlock();
+ }
+
+ bool tryLock()
+ {
+ ThreadIdentifier me = currentThread();
+ if (me == m_owner) {
+ m_recursionCount++;
+ return true;
+ }
+
+ if (!m_lock.tryLock())
+ return false;
+
+ ASSERT(!m_owner);
+ ASSERT(!m_recursionCount);
+ m_owner = me;
+ m_recursionCount = 1;
+ return true;
+ }
+
+ bool isLocked() const
+ {
+ return m_lock.isLocked();
+ }
+
+private:
+ ThreadIdentifier m_owner { 0 };
+ unsigned m_recursionCount { 0 };
+ LockType m_lock;
+};
+
+} // namespace WTF
+
+
diff --git a/Source/WTF/wtf/RedBlackTree.h b/Source/WTF/wtf/RedBlackTree.h
index 19460c141..7dda72a5b 100644
--- a/Source/WTF/wtf/RedBlackTree.h
+++ b/Source/WTF/wtf/RedBlackTree.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WTF/wtf/Ref.h b/Source/WTF/wtf/Ref.h
index 6ff7c8223..e955352df 100644
--- a/Source/WTF/wtf/Ref.h
+++ b/Source/WTF/wtf/Ref.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,55 +26,211 @@
#ifndef WTF_Ref_h
#define WTF_Ref_h
-#include "Noncopyable.h"
+#include <wtf/Assertions.h>
+#include <wtf/GetPtr.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/TypeCasts.h>
+
+#if ASAN_ENABLED
+extern "C" void __asan_poison_memory_region(void const volatile *addr, size_t size);
+extern "C" void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+extern "C" bool __asan_address_is_poisoned(void const volatile *addr);
+#endif
namespace WTF {
-template<typename T> class PassRef;
+inline void adopted(const void*) { }
+
+template<typename T> class Ref;
+template<typename T> Ref<T> adoptRef(T&);
template<typename T> class Ref {
- WTF_MAKE_NONCOPYABLE(Ref)
public:
- Ref(T& object) : m_ptr(&object) { m_ptr->ref(); }
- template<typename U> Ref(PassRef<U> reference) : m_ptr(&reference.leakRef()) { }
+ static constexpr bool isRef = true;
+
+ ~Ref()
+ {
+#if ASAN_ENABLED
+ if (__asan_address_is_poisoned(this))
+ __asan_unpoison_memory_region(this, sizeof(*this));
+#endif
+ if (m_ptr)
+ m_ptr->deref();
+ }
+
+ Ref(T& object)
+ : m_ptr(&object)
+ {
+ m_ptr->ref();
+ }
- ~Ref() { m_ptr->deref(); }
+ // Use copyRef() instead.
+ Ref(const Ref& other) = delete;
+ template<typename U> Ref(const Ref<U>& other) = delete;
+
+ Ref(Ref&& other)
+ : m_ptr(&other.leakRef())
+ {
+ ASSERT(m_ptr);
+ }
+
+ template<typename U>
+ Ref(Ref<U>&& other)
+ : m_ptr(&other.leakRef())
+ {
+ ASSERT(m_ptr);
+ }
Ref& operator=(T& object)
{
+ ASSERT(m_ptr);
object.ref();
m_ptr->deref();
m_ptr = &object;
+ ASSERT(m_ptr);
return *this;
}
- template<typename U> Ref& operator=(PassRef<U> reference)
+
+ // Use copyRef() and the move assignment operators instead.
+ Ref& operator=(const Ref& reference) = delete;
+ template<typename U> Ref& operator=(const Ref<U>& reference) = delete;
+
+ Ref& operator=(Ref&& reference)
{
+ ASSERT(m_ptr);
m_ptr->deref();
m_ptr = &reference.leakRef();
+ ASSERT(m_ptr);
return *this;
}
- const T* operator->() const { return m_ptr; }
- T* operator->() { return m_ptr; }
+ template<typename U> Ref& operator=(Ref<U>&& reference)
+ {
+ ASSERT(m_ptr);
+ m_ptr->deref();
+ m_ptr = &reference.leakRef();
+ ASSERT(m_ptr);
+ return *this;
+ }
+
+ // Hash table deleted values, which are only constructed and never copied or destroyed.
+ Ref(HashTableDeletedValueType) : m_ptr(hashTableDeletedValue()) { }
+ bool isHashTableDeletedValue() const { return m_ptr == hashTableDeletedValue(); }
+ static T* hashTableDeletedValue() { return reinterpret_cast<T*>(-1); }
+
+ Ref(HashTableEmptyValueType) : m_ptr(hashTableEmptyValue()) { }
+ bool isHashTableEmptyValue() const { return m_ptr == hashTableEmptyValue(); }
+ static T* hashTableEmptyValue() { return nullptr; }
+
+ const T* ptrAllowingHashTableEmptyValue() const { ASSERT(m_ptr || isHashTableEmptyValue()); return m_ptr; }
+ T* ptrAllowingHashTableEmptyValue() { ASSERT(m_ptr || isHashTableEmptyValue()); return m_ptr; }
+
+ void assignToHashTableEmptyValue(Ref&& reference)
+ {
+ ASSERT(m_ptr == hashTableEmptyValue());
+ m_ptr = &reference.leakRef();
+ ASSERT(m_ptr);
+ }
+
+ T* operator->() const { ASSERT(m_ptr); return m_ptr; }
+ T* ptr() const { ASSERT(m_ptr); return m_ptr; }
+ T& get() const { ASSERT(m_ptr); return *m_ptr; }
+ operator T&() const { ASSERT(m_ptr); return *m_ptr; }
+
+ template<typename U> Ref<T> replace(Ref<U>&&) WARN_UNUSED_RETURN;
+
+#if COMPILER_SUPPORTS(CXX_REFERENCE_QUALIFIED_FUNCTIONS)
+ Ref copyRef() && = delete;
+ Ref copyRef() const & WARN_UNUSED_RETURN { return Ref(*m_ptr); }
+#else
+ Ref copyRef() const WARN_UNUSED_RETURN { return Ref(*m_ptr); }
+#endif
- const T& get() const { return *m_ptr; }
- T& get() { return *m_ptr; }
+ T& leakRef() WARN_UNUSED_RETURN
+ {
+ ASSERT(m_ptr);
- template<typename U> PassRef<T> replace(PassRef<U>) WARN_UNUSED_RETURN;
+ T& result = *std::exchange(m_ptr, nullptr);
+#if ASAN_ENABLED
+ __asan_poison_memory_region(this, sizeof(*this));
+#endif
+ return result;
+ }
private:
+ friend Ref adoptRef<T>(T&);
+
+ enum AdoptTag { Adopt };
+ Ref(T& object, AdoptTag)
+ : m_ptr(&object)
+ {
+ }
+
T* m_ptr;
};
-template<typename T> template<typename U> inline PassRef<T> Ref<T>::replace(PassRef<U> reference)
+template<typename T> template<typename U> inline Ref<T> Ref<T>::replace(Ref<U>&& reference)
{
auto oldReference = adoptRef(*m_ptr);
m_ptr = &reference.leakRef();
return oldReference;
}
+template<typename T, typename U> inline Ref<T> static_reference_cast(Ref<U>& reference)
+{
+ return Ref<T>(static_cast<T&>(reference.get()));
+}
+
+template<typename T, typename U> inline Ref<T> static_reference_cast(Ref<U>&& reference)
+{
+ return adoptRef(static_cast<T&>(reference.leakRef()));
+}
+
+template<typename T, typename U> inline Ref<T> static_reference_cast(const Ref<U>& reference)
+{
+ return Ref<T>(static_cast<T&>(reference.copyRef().get()));
+}
+
+template <typename T>
+struct GetPtrHelper<Ref<T>> {
+ typedef T* PtrType;
+ static T* getPtr(const Ref<T>& p) { return const_cast<T*>(p.ptr()); }
+};
+
+template <typename T>
+struct IsSmartPtr<Ref<T>> {
+ static const bool value = true;
+};
+
+template<typename T>
+inline Ref<T> adoptRef(T& reference)
+{
+ adopted(&reference);
+ return Ref<T>(reference, Ref<T>::Adopt);
+}
+
+template<typename T>
+inline Ref<T> makeRef(T& reference)
+{
+ return Ref<T>(reference);
+}
+
+template<typename ExpectedType, typename ArgType> inline bool is(Ref<ArgType>& source)
+{
+ return is<ExpectedType>(source.get());
+}
+
+template<typename ExpectedType, typename ArgType> inline bool is(const Ref<ArgType>& source)
+{
+ return is<ExpectedType>(source.get());
+}
+
} // namespace WTF
using WTF::Ref;
+using WTF::adoptRef;
+using WTF::makeRef;
+using WTF::static_reference_cast;
#endif // WTF_Ref_h
diff --git a/Source/WTF/wtf/RefCounted.h b/Source/WTF/wtf/RefCounted.h
index ab6c27a35..13762e07a 100644
--- a/Source/WTF/wtf/RefCounted.h
+++ b/Source/WTF/wtf/RefCounted.h
@@ -18,17 +18,15 @@
*
*/
-#ifndef RefCounted_h
-#define RefCounted_h
+#pragma once
#include <wtf/Assertions.h>
#include <wtf/FastMalloc.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
namespace WTF {
-#ifdef NDEBUG
+#if defined(NDEBUG) && !ENABLE(SECURITY_ASSERTIONS)
#define CHECK_REF_COUNTED_LIFECYCLE 0
#else
#define CHECK_REF_COUNTED_LIFECYCLE 1
@@ -39,10 +37,10 @@ namespace WTF {
// generated by the compiler (technique called template hoisting).
class RefCountedBase {
public:
- void ref()
+ void ref() const
{
#if CHECK_REF_COUNTED_LIFECYCLE
- ASSERT(!m_deletionHasBegun);
+ ASSERT_WITH_SECURITY_IMPLICATION(!m_deletionHasBegun);
ASSERT(!m_adoptionIsRequired);
#endif
++m_refCount;
@@ -64,7 +62,7 @@ public:
void relaxAdoptionRequirement()
{
#if CHECK_REF_COUNTED_LIFECYCLE
- ASSERT(!m_deletionHasBegun);
+ ASSERT_WITH_SECURITY_IMPLICATION(!m_deletionHasBegun);
ASSERT(m_adoptionIsRequired);
m_adoptionIsRequired = false;
#endif
@@ -89,10 +87,10 @@ protected:
}
// Returns whether the pointer should be freed or not.
- bool derefBase()
+ bool derefBase() const
{
#if CHECK_REF_COUNTED_LIFECYCLE
- ASSERT(!m_deletionHasBegun);
+ ASSERT_WITH_SECURITY_IMPLICATION(!m_deletionHasBegun);
ASSERT(!m_adoptionIsRequired);
#endif
@@ -121,10 +119,10 @@ private:
friend void adopted(RefCountedBase*);
#endif
- unsigned m_refCount;
+ mutable unsigned m_refCount;
#if CHECK_REF_COUNTED_LIFECYCLE
- bool m_deletionHasBegun;
- bool m_adoptionIsRequired;
+ mutable bool m_deletionHasBegun;
+ mutable bool m_adoptionIsRequired;
#endif
};
@@ -133,7 +131,7 @@ inline void adopted(RefCountedBase* object)
{
if (!object)
return;
- ASSERT(!object->m_deletionHasBegun);
+ ASSERT_WITH_SECURITY_IMPLICATION(!object->m_deletionHasBegun);
object->m_adoptionIsRequired = false;
}
#endif
@@ -141,10 +139,10 @@ inline void adopted(RefCountedBase* object)
template<typename T> class RefCounted : public RefCountedBase {
WTF_MAKE_NONCOPYABLE(RefCounted); WTF_MAKE_FAST_ALLOCATED;
public:
- void deref()
+ void deref() const
{
if (derefBase())
- delete static_cast<T*>(this);
+ delete static_cast<const T*>(this);
}
protected:
@@ -157,5 +155,3 @@ protected:
} // namespace WTF
using WTF::RefCounted;
-
-#endif // RefCounted_h
diff --git a/Source/WTF/wtf/RefCountedArray.h b/Source/WTF/wtf/RefCountedArray.h
index a55d7aeea..2fa8423db 100644
--- a/Source/WTF/wtf/RefCountedArray.h
+++ b/Source/WTF/wtf/RefCountedArray.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -72,7 +72,16 @@ public:
VectorTypeOperations<T>::initialize(begin(), end());
}
- explicit RefCountedArray(const Vector<T>& other)
+ RefCountedArray clone() const
+ {
+ RefCountedArray result(size());
+ for (unsigned i = size(); i--;)
+ result[i] = at(i);
+ return result;
+ }
+
+ template<size_t inlineCapacity, typename OverflowHandler>
+ explicit RefCountedArray(const Vector<T, inlineCapacity, OverflowHandler>& other)
{
if (other.isEmpty()) {
m_data = 0;
diff --git a/Source/WTF/wtf/RefCountedLeakCounter.cpp b/Source/WTF/wtf/RefCountedLeakCounter.cpp
index 721fa9f43..be8726638 100644
--- a/Source/WTF/wtf/RefCountedLeakCounter.cpp
+++ b/Source/WTF/wtf/RefCountedLeakCounter.cpp
@@ -39,7 +39,11 @@ void RefCountedLeakCounter::decrement() { }
#else
#define LOG_CHANNEL_PREFIX Log
+#if RELEASE_LOG_DISABLED
static WTFLogChannel LogRefCountedLeaks = { WTFLogChannelOn, "RefCountedLeaks" };
+#else
+static WTFLogChannel LogRefCountedLeaks = { WTFLogChannelOn, "RefCountedLeaks", LOG_CHANNEL_WEBKIT_SUBSYSTEM, OS_LOG_DEFAULT };
+#endif
typedef HashCountedSet<const char*, PtrHash<const char*>> ReasonSet;
static ReasonSet* leakMessageSuppressionReasons;
diff --git a/Source/WTF/wtf/RefCounter.h b/Source/WTF/wtf/RefCounter.h
new file mode 100644
index 000000000..b1aad5603
--- /dev/null
+++ b/Source/WTF/wtf/RefCounter.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RefCounter_h
+#define RefCounter_h
+
+#include <functional>
+#include <wtf/Noncopyable.h>
+#include <wtf/RefPtr.h>
+
+namespace WTF {
+
+enum class RefCounterEvent { Decrement, Increment };
+
+template<typename T>
+class RefCounter {
+ WTF_MAKE_NONCOPYABLE(RefCounter);
+
+ class Count {
+ WTF_MAKE_NONCOPYABLE(Count);
+ public:
+ void ref();
+ void deref();
+
+ private:
+ friend class RefCounter;
+
+ Count(RefCounter& refCounter)
+ : m_refCounter(&refCounter)
+ , m_value(0)
+ {
+ }
+
+ RefCounter* m_refCounter;
+ size_t m_value;
+ };
+
+public:
+ using Token = RefPtr<Count>;
+ using ValueChangeFunction = std::function<void (RefCounterEvent)>;
+
+ RefCounter(ValueChangeFunction = nullptr);
+ ~RefCounter();
+
+ Token count() const
+ {
+ return m_count;
+ }
+
+ size_t value() const
+ {
+ return m_count->m_value;
+ }
+
+private:
+ ValueChangeFunction m_valueDidChange;
+ Count* m_count;
+};
+
+template<typename T>
+inline void RefCounter<T>::Count::ref()
+{
+ ++m_value;
+ if (m_refCounter && m_refCounter->m_valueDidChange)
+ m_refCounter->m_valueDidChange(RefCounterEvent::Increment);
+}
+
+template<typename T>
+inline void RefCounter<T>::Count::deref()
+{
+ ASSERT(m_value);
+
+ --m_value;
+ if (m_refCounter && m_refCounter->m_valueDidChange)
+ m_refCounter->m_valueDidChange(RefCounterEvent::Decrement);
+
+ // The Count object is kept alive so long as either the RefCounter that created it remains
+ // allocated, or so long as its reference count is non-zero.
+ // If the RefCounter has already been deallocted then delete the Count when its reference
+ // count reaches zero.
+ if (!m_refCounter && !m_value)
+ delete this;
+}
+
+template<typename T>
+inline RefCounter<T>::RefCounter(ValueChangeFunction valueDidChange)
+ : m_valueDidChange(valueDidChange)
+ , m_count(new Count(*this))
+{
+}
+
+template<typename T>
+inline RefCounter<T>::~RefCounter()
+{
+ // The Count object is kept alive so long as either the RefCounter that created it remains
+ // allocated, or so long as its reference count is non-zero.
+ // If the reference count of the Count is already zero then delete it now, otherwise
+ // clear its m_refCounter pointer.
+ if (m_count->m_value)
+ m_count->m_refCounter = nullptr;
+ else
+ delete m_count;
+}
+
+} // namespace WTF
+
+using WTF::RefCounter;
+using WTF::RefCounterEvent;
+
+#endif // RefCounter_h
diff --git a/Source/WTF/wtf/RefPtr.h b/Source/WTF/wtf/RefPtr.h
index f26109352..fe1630c97 100644
--- a/Source/WTF/wtf/RefPtr.h
+++ b/Source/WTF/wtf/RefPtr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2005-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -23,195 +23,240 @@
#ifndef WTF_RefPtr_h
#define WTF_RefPtr_h
-#include "FastMalloc.h"
-#include "PassRefPtr.h"
#include <algorithm>
#include <utility>
+#include <wtf/FastMalloc.h>
+#include <wtf/GetPtr.h>
+#include <wtf/PassRefPtr.h>
namespace WTF {
- enum HashTableDeletedValueType { HashTableDeletedValue };
+template<typename T> class RefPtr;
+template<typename T> RefPtr<T> adoptRef(T*);
- template<typename T> class RefPtr {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- ALWAYS_INLINE RefPtr() : m_ptr(nullptr) { }
- ALWAYS_INLINE RefPtr(T* ptr) : m_ptr(ptr) { refIfNotNull(ptr); }
- ALWAYS_INLINE RefPtr(const RefPtr& o) : m_ptr(o.m_ptr) { refIfNotNull(m_ptr); }
- template<typename U> RefPtr(const RefPtr<U>& o) : m_ptr(o.get()) { refIfNotNull(m_ptr); }
+template<typename T> class RefPtr {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ typedef T ValueType;
+ typedef ValueType* PtrType;
- ALWAYS_INLINE RefPtr(RefPtr&& o) : m_ptr(o.release().leakRef()) { }
- template<typename U> RefPtr(RefPtr<U>&& o) : m_ptr(o.release().leakRef()) { }
+ static constexpr bool isRefPtr = true;
- // See comments in PassRefPtr.h for an explanation of why this takes a const reference.
- template<typename U> RefPtr(const PassRefPtr<U>&);
+ ALWAYS_INLINE RefPtr() : m_ptr(nullptr) { }
+ ALWAYS_INLINE RefPtr(T* ptr) : m_ptr(ptr) { refIfNotNull(ptr); }
+ ALWAYS_INLINE RefPtr(const RefPtr& o) : m_ptr(o.m_ptr) { refIfNotNull(m_ptr); }
+ template<typename U> RefPtr(const RefPtr<U>& o) : m_ptr(o.get()) { refIfNotNull(m_ptr); }
- template<typename U> RefPtr(PassRef<U>);
+ ALWAYS_INLINE RefPtr(RefPtr&& o) : m_ptr(o.leakRef()) { }
+ template<typename U> RefPtr(RefPtr<U>&& o) : m_ptr(o.leakRef()) { }
- // Hash table deleted values, which are only constructed and never copied or destroyed.
- RefPtr(HashTableDeletedValueType) : m_ptr(hashTableDeletedValue()) { }
- bool isHashTableDeletedValue() const { return m_ptr == hashTableDeletedValue(); }
+ // See comments in PassRefPtr.h for an explanation of why this takes a const reference.
+ template<typename U> RefPtr(const PassRefPtr<U>&);
- ALWAYS_INLINE ~RefPtr() { derefIfNotNull(m_ptr); }
+ template<typename U> RefPtr(Ref<U>&&);
- T* get() const { return m_ptr; }
-
- void clear();
- PassRefPtr<T> release() { PassRefPtr<T> tmp = adoptRef(m_ptr); m_ptr = nullptr; return tmp; }
- PassRef<T> releaseNonNull() { ASSERT(m_ptr); PassRef<T> tmp = adoptRef(*m_ptr); m_ptr = nullptr; return tmp; }
+ // Hash table deleted values, which are only constructed and never copied or destroyed.
+ RefPtr(HashTableDeletedValueType) : m_ptr(hashTableDeletedValue()) { }
+ bool isHashTableDeletedValue() const { return m_ptr == hashTableDeletedValue(); }
- T& operator*() const { return *m_ptr; }
- ALWAYS_INLINE T* operator->() const { return m_ptr; }
-
- bool operator!() const { return !m_ptr; }
-
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef T* (RefPtr::*UnspecifiedBoolType);
- operator UnspecifiedBoolType() const { return m_ptr ? &RefPtr::m_ptr : nullptr; }
-
- RefPtr& operator=(const RefPtr&);
- RefPtr& operator=(T*);
- RefPtr& operator=(const PassRefPtr<T>&);
- template<typename U> RefPtr& operator=(const RefPtr<U>&);
- template<typename U> RefPtr& operator=(const PassRefPtr<U>&);
- RefPtr& operator=(RefPtr&&);
- template<typename U> RefPtr& operator=(RefPtr<U>&&);
- template<typename U> RefPtr& operator=(PassRef<U>);
-
- void swap(RefPtr&);
-
- static T* hashTableDeletedValue() { return reinterpret_cast<T*>(-1); }
-
- private:
- T* m_ptr;
- };
-
- template<typename T> template<typename U> inline RefPtr<T>::RefPtr(const PassRefPtr<U>& o)
- : m_ptr(o.leakRef())
- {
- }
-
- template<typename T> template<typename U> inline RefPtr<T>::RefPtr(PassRef<U> reference)
- : m_ptr(&reference.leakRef())
- {
- }
-
- template<typename T> inline void RefPtr<T>::clear()
- {
- T* ptr = m_ptr;
- m_ptr = nullptr;
- derefIfNotNull(ptr);
- }
-
- template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr& o)
- {
- RefPtr ptr = o;
- swap(ptr);
- return *this;
- }
-
- template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr<U>& o)
- {
- RefPtr ptr = o;
- swap(ptr);
- return *this;
- }
-
- template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(T* optr)
- {
- RefPtr ptr = optr;
- swap(ptr);
- return *this;
- }
-
- template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<T>& o)
- {
- RefPtr ptr = o;
- swap(ptr);
- return *this;
- }
-
- template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<U>& o)
- {
- RefPtr ptr = o;
- swap(ptr);
- return *this;
- }
-
- template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(RefPtr&& o)
- {
- RefPtr ptr = std::move(o);
- swap(ptr);
- return *this;
- }
-
- template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(RefPtr<U>&& o)
- {
- RefPtr ptr = std::move(o);
- swap(ptr);
- return *this;
- }
-
- template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(PassRef<U> reference)
- {
- RefPtr ptr = std::move(reference);
- swap(ptr);
- return *this;
- }
-
- template<class T> inline void RefPtr<T>::swap(RefPtr& o)
- {
- std::swap(m_ptr, o.m_ptr);
- }
-
- template<class T> inline void swap(RefPtr<T>& a, RefPtr<T>& b)
- {
- a.swap(b);
- }
-
- template<typename T, typename U> inline bool operator==(const RefPtr<T>& a, const RefPtr<U>& b)
- {
- return a.get() == b.get();
- }
-
- template<typename T, typename U> inline bool operator==(const RefPtr<T>& a, U* b)
- {
- return a.get() == b;
- }
-
- template<typename T, typename U> inline bool operator==(T* a, const RefPtr<U>& b)
- {
- return a == b.get();
- }
-
- template<typename T, typename U> inline bool operator!=(const RefPtr<T>& a, const RefPtr<U>& b)
- {
- return a.get() != b.get();
- }
-
- template<typename T, typename U> inline bool operator!=(const RefPtr<T>& a, U* b)
- {
- return a.get() != b;
- }
-
- template<typename T, typename U> inline bool operator!=(T* a, const RefPtr<U>& b)
- {
- return a != b.get();
- }
-
- template<typename T, typename U> inline RefPtr<T> static_pointer_cast(const RefPtr<U>& p)
- {
- return RefPtr<T>(static_cast<T*>(p.get()));
- }
+ ALWAYS_INLINE ~RefPtr() { derefIfNotNull(std::exchange(m_ptr, nullptr)); }
- template<typename T> inline T* getPtr(const RefPtr<T>& p)
- {
- return p.get();
- }
+ T* get() const { return m_ptr; }
+
+ // FIXME: Remove release() and change all call sites to call WTFMove().
+ RefPtr<T> release() { RefPtr<T> tmp = adoptRef(m_ptr); m_ptr = nullptr; return tmp; }
+ Ref<T> releaseNonNull() { ASSERT(m_ptr); Ref<T> tmp(adoptRef(*m_ptr)); m_ptr = nullptr; return tmp; }
+ Ref<const T> releaseConstNonNull() { ASSERT(m_ptr); Ref<const T> tmp(adoptRef(*m_ptr)); m_ptr = nullptr; return tmp; }
+
+ T* leakRef() WARN_UNUSED_RETURN;
+
+ T& operator*() const { ASSERT(m_ptr); return *m_ptr; }
+ ALWAYS_INLINE T* operator->() const { return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+ typedef T* (RefPtr::*UnspecifiedBoolType);
+ operator UnspecifiedBoolType() const { return m_ptr ? &RefPtr::m_ptr : nullptr; }
+
+ RefPtr& operator=(const RefPtr&);
+ RefPtr& operator=(T*);
+ RefPtr& operator=(std::nullptr_t);
+ RefPtr& operator=(const PassRefPtr<T>&);
+ template<typename U> RefPtr& operator=(const RefPtr<U>&);
+ template<typename U> RefPtr& operator=(const PassRefPtr<U>&);
+ RefPtr& operator=(RefPtr&&);
+ template<typename U> RefPtr& operator=(RefPtr<U>&&);
+ template<typename U> RefPtr& operator=(Ref<U>&&);
+
+ void swap(RefPtr&);
+
+ static T* hashTableDeletedValue() { return reinterpret_cast<T*>(-1); }
+
+#if COMPILER_SUPPORTS(CXX_REFERENCE_QUALIFIED_FUNCTIONS)
+ RefPtr copyRef() && = delete;
+ RefPtr copyRef() const & WARN_UNUSED_RETURN { return RefPtr(m_ptr); }
+#else
+ RefPtr copyRef() const WARN_UNUSED_RETURN { return RefPtr(m_ptr); }
+#endif
+
+private:
+ friend RefPtr adoptRef<T>(T*);
+
+ enum AdoptTag { Adopt };
+ RefPtr(T* ptr, AdoptTag) : m_ptr(ptr) { }
+
+ T* m_ptr;
+};
+
+template<typename T> template<typename U> inline RefPtr<T>::RefPtr(const PassRefPtr<U>& o)
+ : m_ptr(o.leakRef())
+{
+}
+
+template<typename T> template<typename U> inline RefPtr<T>::RefPtr(Ref<U>&& reference)
+ : m_ptr(&reference.leakRef())
+{
+}
+
+template<typename T>
+inline T* RefPtr<T>::leakRef()
+{
+ return std::exchange(m_ptr, nullptr);
+}
+
+template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr& o)
+{
+ RefPtr ptr = o;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr<U>& o)
+{
+ RefPtr ptr = o;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(T* optr)
+{
+ RefPtr ptr = optr;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(std::nullptr_t)
+{
+ derefIfNotNull(std::exchange(m_ptr, nullptr));
+ return *this;
+}
+
+template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<T>& o)
+{
+ RefPtr ptr = o;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<U>& o)
+{
+ RefPtr ptr = o;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> inline RefPtr<T>& RefPtr<T>::operator=(RefPtr&& o)
+{
+ RefPtr ptr = WTFMove(o);
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(RefPtr<U>&& o)
+{
+ RefPtr ptr = WTFMove(o);
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline RefPtr<T>& RefPtr<T>::operator=(Ref<U>&& reference)
+{
+ RefPtr ptr = WTFMove(reference);
+ swap(ptr);
+ return *this;
+}
+
+template<class T> inline void RefPtr<T>::swap(RefPtr& o)
+{
+ std::swap(m_ptr, o.m_ptr);
+}
+
+template<class T> inline void swap(RefPtr<T>& a, RefPtr<T>& b)
+{
+ a.swap(b);
+}
+
+template<typename T, typename U> inline bool operator==(const RefPtr<T>& a, const RefPtr<U>& b)
+{
+ return a.get() == b.get();
+}
+
+template<typename T, typename U> inline bool operator==(const RefPtr<T>& a, U* b)
+{
+ return a.get() == b;
+}
+
+template<typename T, typename U> inline bool operator==(T* a, const RefPtr<U>& b)
+{
+ return a == b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const RefPtr<T>& a, const RefPtr<U>& b)
+{
+ return a.get() != b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const RefPtr<T>& a, U* b)
+{
+ return a.get() != b;
+}
+
+template<typename T, typename U> inline bool operator!=(T* a, const RefPtr<U>& b)
+{
+ return a != b.get();
+}
+
+template<typename T, typename U> inline RefPtr<T> static_pointer_cast(const RefPtr<U>& p)
+{
+ return RefPtr<T>(static_cast<T*>(p.get()));
+}
+
+template <typename T> struct IsSmartPtr<RefPtr<T>> {
+ static const bool value = true;
+};
+
+template<typename T> inline RefPtr<T> adoptRef(T* p)
+{
+ adopted(p);
+ return RefPtr<T>(p, RefPtr<T>::Adopt);
+}
+
+template<typename T> inline RefPtr<T> makeRefPtr(T* pointer)
+{
+ return pointer;
+}
+
+template<typename T> inline RefPtr<T> makeRefPtr(T& reference)
+{
+ return &reference;
+}
} // namespace WTF
using WTF::RefPtr;
+using WTF::adoptRef;
+using WTF::makeRefPtr;
using WTF::static_pointer_cast;
#endif // WTF_RefPtr_h
diff --git a/Source/WTF/wtf/RefPtrHashMap.h b/Source/WTF/wtf/RefPtrHashMap.h
deleted file mode 100644
index 152bdeece..000000000
--- a/Source/WTF/wtf/RefPtrHashMap.h
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2013 Apple Inc. All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public License
- * along with this library; see the file COPYING.LIB. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-#ifndef RefPtrHashMap_h
-#define RefPtrHashMap_h
-
-namespace WTF {
-
- // This specialization is a copy of HashMap for use with RefPtr keys, with overloaded functions
- // to allow for lookup by pointer instead of RefPtr, avoiding ref-count churn.
-
- // FIXME: Find a way to do this with traits that doesn't require a copy of the HashMap template.
-
- template<typename T, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
- class HashMap<RefPtr<T>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg> {
- WTF_MAKE_FAST_ALLOCATED;
- private:
- typedef KeyTraitsArg KeyTraits;
- typedef MappedTraitsArg MappedTraits;
- typedef KeyValuePairHashTraits<KeyTraits, MappedTraits> ValueTraits;
-
- public:
- typedef typename KeyTraits::TraitType KeyType;
- typedef T* RawKeyType;
- typedef typename MappedTraits::TraitType MappedType;
- typedef typename ValueTraits::TraitType ValueType;
-
- private:
- typedef typename MappedTraits::PeekType MappedPeekType;
-
- typedef HashArg HashFunctions;
-
- typedef HashTable<KeyType, ValueType, KeyValuePairKeyExtractor<ValueType>,
- HashFunctions, ValueTraits, KeyTraits> HashTableType;
-
- typedef HashMapTranslator<ValueTraits, HashFunctions>
- Translator;
-
- public:
- typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator;
- typedef HashTableConstIteratorAdapter<HashTableType, ValueType> const_iterator;
- typedef typename HashTableType::AddResult AddResult;
-
- void swap(HashMap&);
-
- int size() const;
- int capacity() const;
- bool isEmpty() const;
-
- // iterators iterate over pairs of keys and values
- iterator begin();
- iterator end();
- const_iterator begin() const;
- const_iterator end() const;
-
- IteratorRange<typename iterator::Keys> keys() { return makeIteratorRange(begin().keys(), end().keys()); }
- const IteratorRange<typename const_iterator::Keys> keys() const { return makeIteratorRange(begin().keys(), end().keys()); }
-
- IteratorRange<typename iterator::Values> values() { return makeIteratorRange(begin().values(), end().values()); }
- const IteratorRange<typename const_iterator::Values> values() const { return makeIteratorRange(begin().values(), end().values()); }
-
- iterator find(const KeyType&);
- iterator find(RawKeyType);
- const_iterator find(const KeyType&) const;
- const_iterator find(RawKeyType) const;
- bool contains(const KeyType&) const;
- bool contains(RawKeyType) const;
- MappedPeekType get(const KeyType&) const;
- MappedPeekType get(RawKeyType) const;
- MappedPeekType inlineGet(RawKeyType) const;
-
- // replaces value but not key if key is already present
- // return value is a pair of the iterator to the key location,
- // and a boolean that's true if a new value was actually added
- template<typename V> AddResult set(const KeyType&, V&&);
- template<typename V> AddResult set(RawKeyType, V&&);
-
- // does nothing if key is already present
- // return value is a pair of the iterator to the key location,
- // and a boolean that's true if a new value was actually added
- template<typename V> AddResult add(const KeyType&, V&&);
- template<typename V> AddResult add(RawKeyType, V&&);
-
- bool remove(const KeyType&);
- bool remove(RawKeyType);
- bool remove(iterator);
- void clear();
-
- MappedType take(const KeyType&); // efficient combination of get with remove
- MappedType take(RawKeyType); // efficient combination of get with remove
-
- private:
- template<typename V>
- AddResult inlineAdd(const KeyType&, V&&);
-
- template<typename V>
- AddResult inlineAdd(RawKeyType, V&&);
-
- HashTableType m_impl;
- };
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline void HashMap<RefPtr<T>, U, V, W, X>::swap(HashMap& other)
- {
- m_impl.swap(other.m_impl);
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline int HashMap<RefPtr<T>, U, V, W, X>::size() const
- {
- return m_impl.size();
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline int HashMap<RefPtr<T>, U, V, W, X>::capacity() const
- {
- return m_impl.capacity();
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline bool HashMap<RefPtr<T>, U, V, W, X>::isEmpty() const
- {
- return m_impl.isEmpty();
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::begin()
- {
- return m_impl.begin();
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::end()
- {
- return m_impl.end();
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::begin() const
- {
- return m_impl.begin();
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::end() const
- {
- return m_impl.end();
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::find(const KeyType& key)
- {
- return m_impl.find(key);
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::find(RawKeyType key)
- {
- return m_impl.template find<Translator>(key);
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::find(const KeyType& key) const
- {
- return m_impl.find(key);
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::find(RawKeyType key) const
- {
- return m_impl.template find<Translator>(key);
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline bool HashMap<RefPtr<T>, U, V, W, X>::contains(const KeyType& key) const
- {
- return m_impl.contains(key);
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline bool HashMap<RefPtr<T>, U, V, W, X>::contains(RawKeyType key) const
- {
- return m_impl.template contains<Translator>(key);
- }
-
- template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
- template<typename V>
- auto HashMap<RefPtr<KeyArg>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::inlineAdd(const KeyType& key, V&& mapped) -> AddResult
- {
- return m_impl.template add<Translator>(key, std::forward<V>(mapped));
- }
-
- template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
- template<typename V>
- auto HashMap<RefPtr<KeyArg>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::inlineAdd(RawKeyType key, V&& mapped) -> AddResult
- {
- return m_impl.template add<Translator>(key, std::forward<V>(mapped));
- }
-
- template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
- template<typename V>
- auto HashMap<RefPtr<KeyArg>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::set(const KeyType& key, V&& value) -> AddResult
- {
- AddResult result = inlineAdd(key, std::forward<V>(value));
- if (!result.isNewEntry) {
- // The inlineAdd call above found an existing hash table entry; we need to set the mapped value.
- result.iterator->value = std::forward<V>(value);
- }
- return result;
- }
-
- template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
- template<typename V>
- auto HashMap<RefPtr<KeyArg>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::set(RawKeyType key, V&& value) -> AddResult
- {
- AddResult result = inlineAdd(key, std::forward<V>(value));
- if (!result.isNewEntry) {
- // The inlineAdd call above found an existing hash table entry; we need to set the mapped value.
- result.iterator->value = std::forward<V>(value);
- }
- return result;
- }
-
- template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
- template<typename V>
- auto HashMap<RefPtr<KeyArg>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::add(const KeyType& key, V&& value) -> AddResult
- {
- return inlineAdd(key, std::forward<V>(value));
- }
-
- template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
- template<typename V>
- auto HashMap<RefPtr<KeyArg>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>::add(RawKeyType key, V&& value) -> AddResult
- {
- return inlineAdd(key, std::forward<V>(value));
- }
-
- template<typename T, typename U, typename V, typename W, typename MappedTraits>
- typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedPeekType
- HashMap<RefPtr<T>, U, V, W, MappedTraits>::get(const KeyType& key) const
- {
- ValueType* entry = const_cast<HashTableType&>(m_impl).lookup(key);
- if (!entry)
- return MappedTraits::peek(MappedTraits::emptyValue());
- return MappedTraits::peek(entry->value);
- }
-
- template<typename T, typename U, typename V, typename W, typename MappedTraits>
- typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedPeekType
- inline HashMap<RefPtr<T>, U, V, W, MappedTraits>::inlineGet(RawKeyType key) const
- {
- ValueType* entry = const_cast<HashTableType&>(m_impl).template lookup<Translator>(key);
- if (!entry)
- return MappedTraits::peek(MappedTraits::emptyValue());
- return MappedTraits::peek(entry->value);
- }
-
- template<typename T, typename U, typename V, typename W, typename MappedTraits>
- typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedPeekType
- HashMap<RefPtr<T>, U, V, W, MappedTraits>::get(RawKeyType key) const
- {
- return inlineGet(key);
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline bool HashMap<RefPtr<T>, U, V, W, X>::remove(iterator it)
- {
- if (it.m_impl == m_impl.end())
- return false;
- m_impl.internalCheckTableConsistency();
- m_impl.removeWithoutEntryConsistencyCheck(it.m_impl);
- return true;
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline bool HashMap<RefPtr<T>, U, V, W, X>::remove(const KeyType& key)
- {
- return remove(find(key));
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline bool HashMap<RefPtr<T>, U, V, W, X>::remove(RawKeyType key)
- {
- return remove(find(key));
- }
-
- template<typename T, typename U, typename V, typename W, typename X>
- inline void HashMap<RefPtr<T>, U, V, W, X>::clear()
- {
- m_impl.clear();
- }
-
- template<typename T, typename U, typename V, typename W, typename MappedTraits>
- auto HashMap<RefPtr<T>, U, V, W, MappedTraits>::take(const KeyType& key) -> MappedType
- {
- iterator it = find(key);
- if (it == end())
- return MappedTraits::emptyValue();
- MappedType value = std::move(it->value);
- remove(it);
- return value;
- }
-
- template<typename T, typename U, typename V, typename W, typename MappedTraits>
- auto HashMap<RefPtr<T>, U, V, W, MappedTraits>::take(RawKeyType key) -> MappedType
- {
- iterator it = find(key);
- if (it == end())
- return MappedTraits::emptyValue();
- MappedType value = std::move(it->value);
- remove(it);
- return value;
- }
-
-} // namespace WTF
-
-#endif // RefPtrHashMap_h
diff --git a/Source/WTF/wtf/RetainPtr.h b/Source/WTF/wtf/RetainPtr.h
index 9cf526701..3b3f8d9bc 100644
--- a/Source/WTF/wtf/RetainPtr.h
+++ b/Source/WTF/wtf/RetainPtr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2006, 2007, 2008, 2010, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2005, 2006, 2007, 2008, 2010, 2013, 2014 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -21,6 +21,8 @@
#ifndef RetainPtr_h
#define RetainPtr_h
+#include <wtf/Platform.h>
+
#if USE(CF) || defined(__OBJC__)
#include <wtf/HashTraits.h>
@@ -45,288 +47,294 @@
namespace WTF {
- // Unlike most most of our smart pointers, RetainPtr can take either the pointer type or the pointed-to type,
- // so both RetainPtr<NSDictionary> and RetainPtr<CFDictionaryRef> will work.
+// Unlike most most of our smart pointers, RetainPtr can take either the pointer type or the pointed-to type,
+// so both RetainPtr<NSDictionary> and RetainPtr<CFDictionaryRef> will work.
-#if !PLATFORM(IOS)
- #define AdoptCF DeprecatedAdoptCF
- #define AdoptNS DeprecatedAdoptNS
-#endif
+template<typename T> class RetainPtr;
- enum AdoptCFTag { AdoptCF };
- enum AdoptNSTag { AdoptNS };
-
-#if defined(__OBJC__) && !__has_feature(objc_arc)
-#ifdef OBJC_NO_GC
- inline void adoptNSReference(id)
- {
- }
-#else
- inline void adoptNSReference(id ptr)
- {
- if (ptr) {
- CFRetain(ptr);
- [ptr release];
- }
- }
-#endif
-#endif
+template<typename T> RetainPtr<T> adoptCF(T CF_RELEASES_ARGUMENT) WARN_UNUSED_RETURN;
+template<typename T> RetainPtr<T> adoptNS(T NS_RELEASES_ARGUMENT) WARN_UNUSED_RETURN;
- template<typename T> class RetainPtr {
- public:
- typedef typename std::remove_pointer<T>::type ValueType;
- typedef ValueType* PtrType;
- typedef CFTypeRef StorageType;
+template<typename T> class RetainPtr {
+public:
+ typedef typename std::remove_pointer<T>::type ValueType;
+ typedef ValueType* PtrType;
+ typedef CFTypeRef StorageType;
- RetainPtr() : m_ptr(0) {}
- RetainPtr(PtrType ptr) : m_ptr(toStorageType(ptr)) { if (m_ptr) CFRetain(m_ptr); }
+ RetainPtr() : m_ptr(nullptr) { }
+ RetainPtr(PtrType ptr) : m_ptr(toStorageType(ptr)) { if (m_ptr) CFRetain(m_ptr); }
- RetainPtr(AdoptCFTag, PtrType ptr)
- : m_ptr(toStorageType(ptr))
- {
-#ifdef __OBJC__
- static_assert((!std::is_convertible<T, id>::value), "Don't use adoptCF with Objective-C pointer types, use adoptNS.");
-#endif
- }
+ RetainPtr(const RetainPtr& o) : m_ptr(o.m_ptr) { if (StorageType ptr = m_ptr) CFRetain(ptr); }
-#if __has_feature(objc_arc)
- RetainPtr(AdoptNSTag, PtrType ptr) : m_ptr(toStorageType(ptr)) { if (m_ptr) CFRetain(m_ptr); }
-#else
- RetainPtr(AdoptNSTag, PtrType ptr)
- : m_ptr(toStorageType(ptr))
- {
- adoptNSReference(ptr);
- }
-#endif
-
- RetainPtr(const RetainPtr& o) : m_ptr(o.m_ptr) { if (StorageType ptr = m_ptr) CFRetain(ptr); }
+ RetainPtr(RetainPtr&& o) : m_ptr(toStorageType(o.leakRef())) { }
+ template<typename U> RetainPtr(RetainPtr<U>&& o) : m_ptr(toStorageType(o.leakRef())) { }
- RetainPtr(RetainPtr&& o) : m_ptr(toStorageType(o.leakRef())) { }
- template<typename U> RetainPtr(RetainPtr<U>&& o) : m_ptr(toStorageType(o.leakRef())) { }
+ // Hash table deleted values, which are only constructed and never copied or destroyed.
+ RetainPtr(HashTableDeletedValueType) : m_ptr(hashTableDeletedValue()) { }
+ bool isHashTableDeletedValue() const { return m_ptr == hashTableDeletedValue(); }
+
+ ~RetainPtr();
+
+ template<typename U> RetainPtr(const RetainPtr<U>&);
- // Hash table deleted values, which are only constructed and never copied or destroyed.
- RetainPtr(HashTableDeletedValueType) : m_ptr(hashTableDeletedValue()) { }
- bool isHashTableDeletedValue() const { return m_ptr == hashTableDeletedValue(); }
-
- ~RetainPtr() { if (StorageType ptr = m_ptr) CFRelease(ptr); }
-
- template<typename U> RetainPtr(const RetainPtr<U>&);
+ void clear();
+ PtrType leakRef() WARN_UNUSED_RETURN;
+ PtrType autorelease();
- void clear();
- PtrType leakRef() WARN_UNUSED_RETURN;
+ PtrType get() const { return fromStorageType(m_ptr); }
+ PtrType operator->() const { return fromStorageType(m_ptr); }
+ explicit operator PtrType() const { return fromStorageType(m_ptr); }
+ explicit operator bool() const { return m_ptr; }
- PtrType get() const { return fromStorageType(m_ptr); }
- PtrType operator->() const { return fromStorageType(m_ptr); }
- explicit operator PtrType() const { return fromStorageType(m_ptr); }
- explicit operator bool() const { return m_ptr; }
+ bool operator!() const { return !m_ptr; }
- bool operator!() const { return !m_ptr; }
-
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef StorageType RetainPtr::*UnspecifiedBoolType;
- operator UnspecifiedBoolType() const { return m_ptr ? &RetainPtr::m_ptr : 0; }
-
- RetainPtr& operator=(const RetainPtr&);
- template<typename U> RetainPtr& operator=(const RetainPtr<U>&);
- RetainPtr& operator=(PtrType);
- template<typename U> RetainPtr& operator=(U*);
+#if !(defined (__OBJC__) && __has_feature(objc_arc))
+ // This function is useful for passing RetainPtrs to functions that return
+ // CF types as out parameters.
+ PtrType* operator&()
+ {
+ // Require that the pointer is null, to prevent leaks.
+ ASSERT(!m_ptr);
- RetainPtr& operator=(RetainPtr&&);
- template<typename U> RetainPtr& operator=(RetainPtr<U>&&);
+ return (PtrType*)&m_ptr;
+ }
+#endif
- void swap(RetainPtr&);
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+ typedef StorageType RetainPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &RetainPtr::m_ptr : nullptr; }
+
+ RetainPtr& operator=(const RetainPtr&);
+ template<typename U> RetainPtr& operator=(const RetainPtr<U>&);
+ RetainPtr& operator=(PtrType);
+ template<typename U> RetainPtr& operator=(U*);
- private:
- static PtrType hashTableDeletedValue() { return reinterpret_cast<PtrType>(-1); }
+ RetainPtr& operator=(RetainPtr&&);
+ template<typename U> RetainPtr& operator=(RetainPtr<U>&&);
-#if defined (__OBJC__) && __has_feature(objc_arc)
- template<typename U>
- typename std::enable_if<std::is_convertible<U, id>::value, PtrType>::type
- fromStorageTypeHelper(StorageType ptr) const
- {
- return (__bridge PtrType)ptr;
- }
-
- template<typename U>
- typename std::enable_if<!std::is_convertible<U, id>::value, PtrType>::type
- fromStorageTypeHelper(StorageType ptr) const
- {
- return (PtrType)ptr;
- }
-
- PtrType fromStorageType(StorageType ptr) const { return fromStorageTypeHelper<PtrType>(ptr); }
- StorageType toStorageType(id ptr) const { return (__bridge StorageType)ptr; }
- StorageType toStorageType(CFTypeRef ptr) const { return (StorageType)ptr; }
-#else
- PtrType fromStorageType(StorageType ptr) const { return (PtrType)ptr; }
- StorageType toStorageType(PtrType ptr) const { return (StorageType)ptr; }
-#endif
+ void swap(RetainPtr&);
- StorageType m_ptr;
- };
+ template<typename U> friend RetainPtr<U> adoptCF(U CF_RELEASES_ARGUMENT) WARN_UNUSED_RETURN;
+ template<typename U> friend RetainPtr<U> adoptNS(U NS_RELEASES_ARGUMENT) WARN_UNUSED_RETURN;
- template<typename T> template<typename U> inline RetainPtr<T>::RetainPtr(const RetainPtr<U>& o)
- : m_ptr(toStorageType(o.get()))
- {
- if (StorageType ptr = m_ptr)
- CFRetain(ptr);
- }
+private:
+ enum AdoptTag { Adopt };
+ RetainPtr(PtrType ptr, AdoptTag) : m_ptr(toStorageType(ptr)) { }
- template<typename T> inline void RetainPtr<T>::clear()
- {
- if (StorageType ptr = m_ptr) {
- m_ptr = 0;
- CFRelease(ptr);
- }
- }
+ static PtrType hashTableDeletedValue() { return reinterpret_cast<PtrType>(-1); }
- template<typename T> inline typename RetainPtr<T>::PtrType RetainPtr<T>::leakRef()
+#if defined (__OBJC__) && __has_feature(objc_arc)
+ template<typename U>
+ typename std::enable_if<std::is_convertible<U, id>::value, PtrType>::type
+ fromStorageTypeHelper(StorageType ptr) const
{
- PtrType ptr = fromStorageType(m_ptr);
- m_ptr = 0;
- return ptr;
+ return (__bridge PtrType)ptr;
}
- template<typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr& o)
+ template<typename U>
+ typename std::enable_if<!std::is_convertible<U, id>::value, PtrType>::type
+ fromStorageTypeHelper(StorageType ptr) const
{
- RetainPtr ptr = o;
- swap(ptr);
- return *this;
+ return (PtrType)ptr;
}
- template<typename T> template<typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr<U>& o)
- {
- RetainPtr ptr = o;
- swap(ptr);
- return *this;
- }
+ PtrType fromStorageType(StorageType ptr) const { return fromStorageTypeHelper<PtrType>(ptr); }
+ StorageType toStorageType(id ptr) const { return (__bridge StorageType)ptr; }
+ StorageType toStorageType(CFTypeRef ptr) const { return (StorageType)ptr; }
+#else
+ PtrType fromStorageType(StorageType ptr) const { return (PtrType)ptr; }
+ StorageType toStorageType(PtrType ptr) const { return (StorageType)ptr; }
+#endif
- template<typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(PtrType optr)
- {
- RetainPtr ptr = optr;
- swap(ptr);
- return *this;
- }
+ StorageType m_ptr;
+};
- template<typename T> template<typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(U* optr)
- {
- RetainPtr ptr = optr;
- swap(ptr);
- return *this;
- }
+template<typename T> inline RetainPtr<T>::~RetainPtr()
+{
+ if (StorageType ptr = std::exchange(m_ptr, nullptr))
+ CFRelease(ptr);
+}
- template<typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(RetainPtr&& o)
- {
- RetainPtr ptr = std::move(o);
- swap(ptr);
- return *this;
- }
+// Helper function for creating a RetainPtr using template argument deduction.
+template<typename T> inline RetainPtr<T> retainPtr(T) WARN_UNUSED_RETURN;
- template<typename T> template<typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(RetainPtr<U>&& o)
- {
- RetainPtr ptr = std::move(o);
- swap(ptr);
- return *this;
- }
+template<typename T> template<typename U> inline RetainPtr<T>::RetainPtr(const RetainPtr<U>& o)
+ : m_ptr(toStorageType(o.get()))
+{
+ if (StorageType ptr = m_ptr)
+ CFRetain(ptr);
+}
- template<typename T> inline void RetainPtr<T>::swap(RetainPtr& o)
- {
- std::swap(m_ptr, o.m_ptr);
- }
+template<typename T> inline void RetainPtr<T>::clear()
+{
+ if (StorageType ptr = std::exchange(m_ptr, nullptr))
+ CFRelease(ptr);
+}
- template<typename T> inline void swap(RetainPtr<T>& a, RetainPtr<T>& b)
- {
- a.swap(b);
- }
+template<typename T> inline typename RetainPtr<T>::PtrType RetainPtr<T>::leakRef()
+{
+ return fromStorageType(std::exchange(m_ptr, nullptr));
+}
- template<typename T, typename U> inline bool operator==(const RetainPtr<T>& a, const RetainPtr<U>& b)
- {
- return a.get() == b.get();
- }
+#ifdef __OBJC__
+template<typename T> inline auto RetainPtr<T>::autorelease() -> PtrType
+{
+ return (__bridge PtrType)CFBridgingRelease(leakRef());
+}
+#endif
- template<typename T, typename U> inline bool operator==(const RetainPtr<T>& a, U* b)
- {
- return a.get() == b;
- }
+template<typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr& o)
+{
+ RetainPtr ptr = o;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr<U>& o)
+{
+ RetainPtr ptr = o;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(PtrType optr)
+{
+ RetainPtr ptr = optr;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(U* optr)
+{
+ RetainPtr ptr = optr;
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(RetainPtr&& o)
+{
+ RetainPtr ptr = WTFMove(o);
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> template<typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(RetainPtr<U>&& o)
+{
+ RetainPtr ptr = WTFMove(o);
+ swap(ptr);
+ return *this;
+}
+
+template<typename T> inline void RetainPtr<T>::swap(RetainPtr& o)
+{
+ std::swap(m_ptr, o.m_ptr);
+}
+
+template<typename T> inline void swap(RetainPtr<T>& a, RetainPtr<T>& b)
+{
+ a.swap(b);
+}
+
+template<typename T, typename U> inline bool operator==(const RetainPtr<T>& a, const RetainPtr<U>& b)
+{
+ return a.get() == b.get();
+}
+
+template<typename T, typename U> inline bool operator==(const RetainPtr<T>& a, U* b)
+{
+ return a.get() == b;
+}
+
+template<typename T, typename U> inline bool operator==(T* a, const RetainPtr<U>& b)
+{
+ return a == b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, const RetainPtr<U>& b)
+{
+ return a.get() != b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, U* b)
+{
+ return a.get() != b;
+}
+
+template<typename T, typename U> inline bool operator!=(T* a, const RetainPtr<U>& b)
+{
+ return a != b.get();
+}
+
+template<typename T> inline RetainPtr<T> adoptCF(T CF_RELEASES_ARGUMENT ptr)
+{
+#ifdef __OBJC__
+ static_assert((!std::is_convertible<T, id>::value), "Don't use adoptCF with Objective-C pointer types, use adoptNS.");
+#endif
+ return RetainPtr<T>(ptr, RetainPtr<T>::Adopt);
+}
- template<typename T, typename U> inline bool operator==(T* a, const RetainPtr<U>& b)
- {
- return a == b.get();
- }
+#ifdef __OBJC__
+template<typename T> inline RetainPtr<T> adoptNS(T NS_RELEASES_ARGUMENT ptr)
+{
+#if __has_feature(objc_arc)
+ return ptr;
+#elif defined(OBJC_NO_GC)
+ return RetainPtr<T>(ptr, RetainPtr<T>::Adopt);
+#else
+ RetainPtr<T> result = ptr;
+ [ptr release];
+ return result;
+#endif
+}
+#endif
- template<typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, const RetainPtr<U>& b)
- {
- return a.get() != b.get();
- }
+template<typename T> inline RetainPtr<T> retainPtr(T ptr)
+{
+ return ptr;
+}
- template<typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, U* b)
- {
- return a.get() != b;
- }
+template <typename T> struct IsSmartPtr<RetainPtr<T>> {
+ static const bool value = true;
+};
- template<typename T, typename U> inline bool operator!=(T* a, const RetainPtr<U>& b)
- {
- return a != b.get();
- }
+template<typename P> struct HashTraits<RetainPtr<P>> : SimpleClassHashTraits<RetainPtr<P>> {
+};
+
+template<typename P> struct DefaultHash<RetainPtr<P>> {
+ typedef PtrHash<RetainPtr<P>> Hash;
+};
- template<typename T> inline RetainPtr<T> adoptCF(T CF_RELEASES_ARGUMENT) WARN_UNUSED_RETURN;
- template<typename T> inline RetainPtr<T> adoptCF(T CF_RELEASES_ARGUMENT o)
+template <typename P>
+struct RetainPtrObjectHashTraits : SimpleClassHashTraits<RetainPtr<P>> {
+ static const RetainPtr<P>& emptyValue()
{
- return RetainPtr<T>(AdoptCF, o);
+ static RetainPtr<P>& null = *(new RetainPtr<P>);
+ return null;
}
+};
- template<typename T> inline RetainPtr<T> adoptNS(T NS_RELEASES_ARGUMENT) WARN_UNUSED_RETURN;
- template<typename T> inline RetainPtr<T> adoptNS(T NS_RELEASES_ARGUMENT o)
+template <typename P>
+struct RetainPtrObjectHash {
+ static unsigned hash(const RetainPtr<P>& o)
{
- return RetainPtr<T>(AdoptNS, o);
+ ASSERT_WITH_MESSAGE(o.get(), "attempt to use null RetainPtr in HashTable");
+ return static_cast<unsigned>(CFHash(o.get()));
}
-
- // Helper function for creating a RetainPtr using template argument deduction.
- template<typename T> inline RetainPtr<T> retainPtr(T) WARN_UNUSED_RETURN;
- template<typename T> inline RetainPtr<T> retainPtr(T o)
+ static bool equal(const RetainPtr<P>& a, const RetainPtr<P>& b)
{
- return o;
+ return CFEqual(a.get(), b.get());
}
+ static const bool safeToCompareToEmptyOrDeleted = false;
+};
- template<typename P> struct HashTraits<RetainPtr<P>> : SimpleClassHashTraits<RetainPtr<P>> { };
-
- template<typename P> struct PtrHash<RetainPtr<P>> : PtrHash<typename RetainPtr<P>::PtrType> {
- using PtrHash<typename RetainPtr<P>::PtrType>::hash;
- static unsigned hash(const RetainPtr<P>& key) { return hash(key.get()); }
- using PtrHash<typename RetainPtr<P>::PtrType>::equal;
- static bool equal(const RetainPtr<P>& a, const RetainPtr<P>& b) { return a == b; }
- static bool equal(typename RetainPtr<P>::PtrType a, const RetainPtr<P>& b) { return a == b; }
- static bool equal(const RetainPtr<P>& a, typename RetainPtr<P>::PtrType b) { return a == b; }
- };
-
- template<typename P> struct DefaultHash<RetainPtr<P>> { typedef PtrHash<RetainPtr<P>> Hash; };
-
- template <typename P>
- struct RetainPtrObjectHashTraits : SimpleClassHashTraits<RetainPtr<P>> {
- static const RetainPtr<P>& emptyValue()
- {
- static RetainPtr<P>& null = *(new RetainPtr<P>);
- return null;
- }
- };
-
- template <typename P>
- struct RetainPtrObjectHash {
- static unsigned hash(const RetainPtr<P>& o)
- {
- ASSERT_WITH_MESSAGE(o.get(), "attempt to use null RetainPtr in HashTable");
- return static_cast<unsigned>(CFHash(o.get()));
- }
- static bool equal(const RetainPtr<P>& a, const RetainPtr<P>& b)
- {
- return CFEqual(a.get(), b.get());
- }
- static const bool safeToCompareToEmptyOrDeleted = false;
- };
-
-#if !PLATFORM(IOS)
- #undef AdoptCF
- #undef AdoptNS
+#ifdef __OBJC__
+template<typename T> T* dynamic_objc_cast(id object)
+{
+ if ([object isKindOfClass:[T class]])
+ return (T *)object;
+
+ return nil;
+}
#endif
} // namespace WTF
@@ -336,9 +344,8 @@ using WTF::adoptCF;
using WTF::adoptNS;
using WTF::retainPtr;
-#if PLATFORM(IOS)
-using WTF::AdoptCF;
-using WTF::AdoptNS;
+#ifdef __OBJC__
+using WTF::dynamic_objc_cast;
#endif
#endif // USE(CF) || defined(__OBJC__)
diff --git a/Source/WTF/wtf/RunLoop.cpp b/Source/WTF/wtf/RunLoop.cpp
index 11a860fd0..66593b04e 100644
--- a/Source/WTF/wtf/RunLoop.cpp
+++ b/Source/WTF/wtf/RunLoop.cpp
@@ -26,6 +26,7 @@
#include "config.h"
#include "RunLoop.h"
+#include <wtf/NeverDestroyed.h>
#include <wtf/StdLibExtras.h>
#include <wtf/ThreadSpecific.h>
@@ -37,39 +38,40 @@ static RunLoop* s_mainRunLoop;
class RunLoop::Holder {
public:
Holder()
- : m_runLoop(adoptRef(new RunLoop))
+ : m_runLoop(adoptRef(*new RunLoop))
{
}
- RunLoop* runLoop() const { return m_runLoop.get(); }
+ RunLoop& runLoop() { return m_runLoop; }
private:
- RefPtr<RunLoop> m_runLoop;
+ Ref<RunLoop> m_runLoop;
};
void RunLoop::initializeMainRunLoop()
{
if (s_mainRunLoop)
return;
- s_mainRunLoop = RunLoop::current();
+ initializeMainThread();
+ s_mainRunLoop = &RunLoop::current();
}
-RunLoop* RunLoop::current()
+RunLoop& RunLoop::current()
{
- DEFINE_STATIC_LOCAL(WTF::ThreadSpecific<RunLoop::Holder>, runLoopHolder, ());
- return runLoopHolder->runLoop();
+ static NeverDestroyed<ThreadSpecific<Holder>> runLoopHolder;
+ return runLoopHolder.get()->runLoop();
}
-RunLoop* RunLoop::main()
+RunLoop& RunLoop::main()
{
ASSERT(s_mainRunLoop);
- return s_mainRunLoop;
+ return *s_mainRunLoop;
}
bool RunLoop::isMain()
{
ASSERT(s_mainRunLoop);
- return s_mainRunLoop == RunLoop::current();
+ return s_mainRunLoop == &RunLoop::current();
}
void RunLoop::performWork()
@@ -88,22 +90,24 @@ void RunLoop::performWork()
// By only handling up to the number of functions that were in the queue when performWork() is called
// we guarantee to occasionally return from the run loop so other event sources will be allowed to spin.
- std::function<void()> function;
size_t functionsToHandle = 0;
-
{
- MutexLocker locker(m_functionQueueLock);
- functionsToHandle = m_functionQueue.size();
+ Function<void ()> function;
+ {
+ MutexLocker locker(m_functionQueueLock);
+ functionsToHandle = m_functionQueue.size();
- if (m_functionQueue.isEmpty())
- return;
+ if (m_functionQueue.isEmpty())
+ return;
- function = m_functionQueue.takeFirst();
- }
+ function = m_functionQueue.takeFirst();
+ }
- function();
+ function();
+ }
for (size_t functionsHandled = 1; functionsHandled < functionsToHandle; ++functionsHandled) {
+ Function<void ()> function;
{
MutexLocker locker(m_functionQueueLock);
@@ -120,10 +124,12 @@ void RunLoop::performWork()
}
}
-void RunLoop::dispatch(std::function<void ()> function)
+void RunLoop::dispatch(Function<void ()>&& function)
{
- MutexLocker locker(m_functionQueueLock);
- m_functionQueue.append(std::move(function));
+ {
+ MutexLocker locker(m_functionQueueLock);
+ m_functionQueue.append(WTFMove(function));
+ }
wakeUp();
}
diff --git a/Source/WTF/wtf/RunLoop.h b/Source/WTF/wtf/RunLoop.h
index dc13a9092..e92452795 100644
--- a/Source/WTF/wtf/RunLoop.h
+++ b/Source/WTF/wtf/RunLoop.h
@@ -28,81 +28,95 @@
#ifndef RunLoop_h
#define RunLoop_h
+#include <wtf/Condition.h>
#include <wtf/Deque.h>
#include <wtf/Forward.h>
#include <wtf/FunctionDispatcher.h>
-#include <wtf/Functional.h>
#include <wtf/HashMap.h>
#include <wtf/RetainPtr.h>
#include <wtf/Threading.h>
-#if USE(GLIB)
-#include <wtf/gobject/GRefPtr.h>
-#endif
-
-#if PLATFORM(EFL)
-#include <Ecore.h>
+#if USE(GLIB_EVENT_LOOP)
+#include <wtf/glib/GRefPtr.h>
#endif
namespace WTF {
class RunLoop : public FunctionDispatcher {
+ WTF_MAKE_NONCOPYABLE(RunLoop);
public:
// Must be called from the main thread (except for the Mac platform, where it
// can be called from any thread).
WTF_EXPORT_PRIVATE static void initializeMainRunLoop();
- WTF_EXPORT_PRIVATE static RunLoop* current();
- WTF_EXPORT_PRIVATE static RunLoop* main();
+ WTF_EXPORT_PRIVATE static RunLoop& current();
+ WTF_EXPORT_PRIVATE static RunLoop& main();
WTF_EXPORT_PRIVATE static bool isMain();
~RunLoop();
- virtual void dispatch(std::function<void()>) override;
+ void dispatch(Function<void ()>&&) override;
WTF_EXPORT_PRIVATE static void run();
WTF_EXPORT_PRIVATE void stop();
WTF_EXPORT_PRIVATE void wakeUp();
-#if PLATFORM(MAC)
+#if USE(COCOA_EVENT_LOOP)
WTF_EXPORT_PRIVATE void runForDuration(double duration);
#endif
-
+
+#if USE(GLIB_EVENT_LOOP)
+ WTF_EXPORT_PRIVATE GMainContext* mainContext() const { return m_mainContext.get(); }
+#endif
+
+#if USE(GENERIC_EVENT_LOOP)
+ // Run the single iteration of the RunLoop. It consumes the pending tasks and expired timers, but it won't be blocked.
+ WTF_EXPORT_PRIVATE static void iterate();
+#endif
+
+#if USE(GLIB_EVENT_LOOP) || USE(GENERIC_EVENT_LOOP)
+ WTF_EXPORT_PRIVATE void dispatchAfter(std::chrono::nanoseconds, Function<void ()>&&);
+#endif
+
class TimerBase {
friend class RunLoop;
public:
- WTF_EXPORT_PRIVATE explicit TimerBase(RunLoop*);
+ WTF_EXPORT_PRIVATE explicit TimerBase(RunLoop&);
WTF_EXPORT_PRIVATE virtual ~TimerBase();
void startRepeating(double repeatInterval) { start(repeatInterval, true); }
+ void startRepeating(std::chrono::milliseconds repeatInterval) { startRepeating(repeatInterval.count() * 0.001); }
void startOneShot(double interval) { start(interval, false); }
+ void startOneShot(std::chrono::milliseconds interval) { start(interval.count() * 0.001, false); }
WTF_EXPORT_PRIVATE void stop();
WTF_EXPORT_PRIVATE bool isActive() const;
virtual void fired() = 0;
+#if USE(GLIB_EVENT_LOOP)
+ void setPriority(int);
+#endif
+
private:
WTF_EXPORT_PRIVATE void start(double nextFireInterval, bool repeat);
- RunLoop* m_runLoop;
+ RunLoop& m_runLoop;
-#if PLATFORM(WIN)
+#if USE(WINDOWS_EVENT_LOOP)
static void timerFired(RunLoop*, uint64_t ID);
uint64_t m_ID;
bool m_isRepeating;
-#elif PLATFORM(MAC)
+#elif USE(COCOA_EVENT_LOOP)
static void timerFired(CFRunLoopTimerRef, void*);
RetainPtr<CFRunLoopTimerRef> m_timer;
-#elif PLATFORM(EFL)
- static bool timerFired(void* data);
- Ecore_Timer* m_timer;
- bool m_isRepeating;
-#elif USE(GLIB)
- static gboolean timerFiredCallback(RunLoop::TimerBase*);
- gboolean isRepeating() const { return m_isRepeating; }
- void clearTimerSource();
- GRefPtr<GSource> m_timerSource;
- gboolean m_isRepeating;
+#elif USE(GLIB_EVENT_LOOP)
+ void updateReadyTime();
+ GRefPtr<GSource> m_source;
+ bool m_isRepeating { false };
+ std::chrono::microseconds m_fireInterval { 0 };
+#elif USE(GENERIC_EVENT_LOOP)
+ class ScheduledTask;
+ RefPtr<ScheduledTask> m_scheduledTask;
#endif
};
@@ -111,7 +125,7 @@ public:
public:
typedef void (TimerFiredClass::*TimerFiredFunction)();
- Timer(RunLoop* runLoop, TimerFiredClass* o, TimerFiredFunction f)
+ Timer(RunLoop& runLoop, TimerFiredClass* o, TimerFiredFunction f)
: TimerBase(runLoop)
, m_object(o)
, m_function(f)
@@ -119,7 +133,7 @@ public:
}
private:
- virtual void fired() { (m_object->*m_function)(); }
+ void fired() override { (m_object->*m_function)(); }
TimerFiredClass* m_object;
TimerFiredFunction m_function;
@@ -133,9 +147,9 @@ private:
void performWork();
Mutex m_functionQueueLock;
- Deque<std::function<void ()>> m_functionQueue;
+ Deque<Function<void ()>> m_functionQueue;
-#if PLATFORM(WIN)
+#if USE(WINDOWS_EVENT_LOOP)
static bool registerRunLoopMessageWindowClass();
static LRESULT CALLBACK RunLoopWndProc(HWND, UINT, WPARAM, LPARAM);
LRESULT wndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam);
@@ -143,30 +157,39 @@ private:
typedef HashMap<uint64_t, TimerBase*> TimerMap;
TimerMap m_activeTimers;
-#elif PLATFORM(MAC)
+#elif USE(COCOA_EVENT_LOOP)
static void performWork(void*);
RetainPtr<CFRunLoopRef> m_runLoop;
RetainPtr<CFRunLoopSourceRef> m_runLoopSource;
- int m_nestingLevel;
-#elif PLATFORM(EFL)
- bool m_initEfl;
-
- Mutex m_pipeLock;
- OwnPtr<Ecore_Pipe> m_pipe;
-
- Mutex m_wakeUpEventRequestedLock;
- bool m_wakeUpEventRequested;
+#elif USE(GLIB_EVENT_LOOP)
+ GRefPtr<GMainContext> m_mainContext;
+ Vector<GRefPtr<GMainLoop>> m_mainLoops;
+ GRefPtr<GSource> m_source;
+#elif USE(GENERIC_EVENT_LOOP)
+ void schedule(RefPtr<TimerBase::ScheduledTask>&&);
+ void schedule(const LockHolder&, RefPtr<TimerBase::ScheduledTask>&&);
+ void wakeUp(const LockHolder&);
+ void scheduleAndWakeUp(RefPtr<TimerBase::ScheduledTask>);
+
+ enum class RunMode {
+ Iterate,
+ Drain
+ };
- static void wakeUpEvent(void* data, void*, unsigned);
-#elif USE(GLIB)
-public:
- static gboolean queueWork(RunLoop*);
- GMainLoop* innermostLoop();
- void pushNestedMainLoop(GMainLoop*);
- void popNestedMainLoop();
-private:
- GRefPtr<GMainContext> m_runLoopContext;
- Vector<GRefPtr<GMainLoop>> m_runLoopMainLoops;
+ enum class Status {
+ Clear,
+ Stopping,
+ };
+ void runImpl(RunMode);
+ bool populateTasks(RunMode, Status&, Deque<RefPtr<TimerBase::ScheduledTask>>&);
+
+ Lock m_loopLock;
+ Condition m_readyToRun;
+ Condition m_stopCondition;
+ Vector<RefPtr<TimerBase::ScheduledTask>> m_schedules;
+ Vector<Status*> m_mainLoops;
+ bool m_shutdown { false };
+ bool m_pendingTasks { false };
#endif
};
diff --git a/Source/WTF/wtf/RunLoopTimer.h b/Source/WTF/wtf/RunLoopTimer.h
new file mode 100644
index 000000000..3780e76c0
--- /dev/null
+++ b/Source/WTF/wtf/RunLoopTimer.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RunLoopTimer_h
+#define RunLoopTimer_h
+
+#include <wtf/SchedulePair.h>
+#include <wtf/RetainPtr.h>
+
+namespace WTF {
+
+// Time intervals are all in seconds.
+
+class WTF_EXPORT_PRIVATE RunLoopTimerBase {
+ WTF_MAKE_NONCOPYABLE(RunLoopTimerBase);
+public:
+ RunLoopTimerBase() { }
+ WTF_EXPORT_PRIVATE virtual ~RunLoopTimerBase();
+
+ WTF_EXPORT_PRIVATE void schedule(const SchedulePair*);
+ WTF_EXPORT_PRIVATE void schedule(const SchedulePairHashSet&);
+
+ WTF_EXPORT_PRIVATE void start(double nextFireInterval, double repeatInterval);
+
+ void startRepeating(double repeatInterval) { start(repeatInterval, repeatInterval); }
+ void startOneShot(double interval) { start(interval, 0); }
+
+ WTF_EXPORT_PRIVATE void stop();
+ bool isActive() const;
+
+ virtual void fired() = 0;
+
+private:
+#if USE(CF)
+ RetainPtr<CFRunLoopTimerRef> m_timer;
+#endif
+};
+
+// FIXME: This doesn't have to be a class template.
+template <typename TimerFiredClass> class RunLoopTimer : public RunLoopTimerBase {
+public:
+ typedef void (TimerFiredClass::*TimerFiredFunction)();
+
+ RunLoopTimer(TimerFiredClass& o, TimerFiredFunction f)
+ : m_object(&o), m_function(f) { }
+
+ virtual void fired() { (m_object->*m_function)(); }
+
+private:
+ TimerFiredClass* m_object;
+ TimerFiredFunction m_function;
+};
+
+} // namespace WTF
+
+using WTF::RunLoopTimer;
+
+#endif
diff --git a/Source/WTF/wtf/RunLoopTimerCF.cpp b/Source/WTF/wtf/RunLoopTimerCF.cpp
new file mode 100644
index 000000000..ea65cbb53
--- /dev/null
+++ b/Source/WTF/wtf/RunLoopTimerCF.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if PLATFORM(COCOA) && HAVE(RUNLOOP_TIMER)
+
+#include "RunLoopTimer.h"
+
+#include "AutodrainedPool.h"
+
+namespace WTF {
+
+RunLoopTimerBase::~RunLoopTimerBase()
+{
+ stop();
+}
+
+static void timerFired(CFRunLoopTimerRef, void* context)
+{
+ // CFRunLoopTimer does not create an NSAutoreleasePool, like NSTimer does. This can lead to
+ // autoreleased objects being pushed into NSAutoreleasePools underneath the run loop, which
+ // are very infrequently drained. Create a new autorelease pool here to give autoreleased objects
+ // a place to collect.
+ AutodrainedPool pool;
+ RunLoopTimerBase* timer = static_cast<RunLoopTimerBase*>(context);
+ timer->fired();
+}
+
+void RunLoopTimerBase::start(double nextFireInterval, double repeatInterval)
+{
+ if (m_timer)
+ CFRunLoopTimerInvalidate(m_timer.get());
+ CFRunLoopTimerContext context = { 0, this, 0, 0, 0 };
+ m_timer = adoptCF(CFRunLoopTimerCreate(0, CFAbsoluteTimeGetCurrent() + nextFireInterval, repeatInterval, 0, 0, timerFired, &context));
+}
+
+void RunLoopTimerBase::schedule(const SchedulePair* schedulePair)
+{
+ ASSERT_ARG(schedulePair, schedulePair);
+ ASSERT_WITH_MESSAGE(m_timer, "Timer must have one of the start functions called before calling schedule().");
+ CFRunLoopAddTimer(schedulePair->runLoop(), m_timer.get(), schedulePair->mode());
+}
+
+void RunLoopTimerBase::schedule(const SchedulePairHashSet& schedulePairs)
+{
+ SchedulePairHashSet::const_iterator end = schedulePairs.end();
+ for (SchedulePairHashSet::const_iterator it = schedulePairs.begin(); it != end; ++it)
+ schedule((*it).get());
+}
+
+void RunLoopTimerBase::stop()
+{
+ if (!m_timer)
+ return;
+ CFRunLoopTimerInvalidate(m_timer.get());
+ m_timer = 0;
+}
+
+bool RunLoopTimerBase::isActive() const
+{
+ return m_timer && CFRunLoopTimerIsValid(m_timer.get());
+}
+
+} // namespace WTF
+
+#endif // PLATFORM(COCOA) && HAVE(RUNLOOP_TIMER)
diff --git a/Source/WTF/wtf/SHA1.cpp b/Source/WTF/wtf/SHA1.cpp
index 5db824fd6..2f84a1e95 100644
--- a/Source/WTF/wtf/SHA1.cpp
+++ b/Source/WTF/wtf/SHA1.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011 Google Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -28,10 +29,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-// A straightforward SHA-1 implementation based on RFC 3174.
-// http://www.ietf.org/rfc/rfc3174.txt
-// The names of functions and variables (such as "a", "b", and "f") follow notations in RFC 3174.
-
#include "config.h"
#include "SHA1.h"
@@ -42,6 +39,29 @@
namespace WTF {
+#if PLATFORM(COCOA)
+
+SHA1::SHA1()
+{
+ CC_SHA1_Init(&m_context);
+}
+
+void SHA1::addBytes(const uint8_t* input, size_t length)
+{
+ CC_SHA1_Update(&m_context, input, length);
+}
+
+void SHA1::computeHash(Digest& hash)
+{
+ CC_SHA1_Final(hash.data(), &m_context);
+}
+
+#else
+
+// A straightforward SHA-1 implementation based on RFC 3174.
+// http://www.ietf.org/rfc/rfc3174.txt
+// The names of functions and variables (such as "a", "b", and "f") follow notations in RFC 3174.
+
static inline uint32_t f(int t, uint32_t b, uint32_t c, uint32_t d)
{
ASSERT(t >= 0 && t < 80);
@@ -104,25 +124,6 @@ void SHA1::computeHash(Digest& digest)
reset();
}
-CString SHA1::hexDigest(const Digest& digest)
-{
- char* start = 0;
- CString result = CString::newUninitialized(40, start);
- char* buffer = start;
- for (size_t i = 0; i < hashSize; ++i) {
- snprintf(buffer, 3, "%02X", digest.at(i));
- buffer += 2;
- }
- return result;
-}
-
-CString SHA1::computeHexDigest()
-{
- Digest digest;
- computeHash(digest);
- return hexDigest(digest);
-}
-
void SHA1::finalize()
{
ASSERT(m_cursor < 64);
@@ -195,4 +196,25 @@ void SHA1::reset()
memset(m_buffer, 0, sizeof(m_buffer));
}
+#endif
+
+CString SHA1::hexDigest(const Digest& digest)
+{
+ char* start = 0;
+ CString result = CString::newUninitialized(40, start);
+ char* buffer = start;
+ for (size_t i = 0; i < hashSize; ++i) {
+ snprintf(buffer, 3, "%02X", digest.at(i));
+ buffer += 2;
+ }
+ return result;
+}
+
+CString SHA1::computeHexDigest()
+{
+ Digest digest;
+ computeHash(digest);
+ return hexDigest(digest);
+}
+
} // namespace WTF
diff --git a/Source/WTF/wtf/SHA1.h b/Source/WTF/wtf/SHA1.h
index 907072b72..009b484ad 100644
--- a/Source/WTF/wtf/SHA1.h
+++ b/Source/WTF/wtf/SHA1.h
@@ -35,6 +35,10 @@
#include <wtf/Vector.h>
#include <wtf/text/CString.h>
+#if PLATFORM(COCOA)
+#include <CommonCrypto/CommonDigest.h>
+#endif
+
namespace WTF {
class SHA1 {
@@ -48,11 +52,6 @@ public:
void addBytes(const CString& input)
{
const char* string = input.data();
- // Make sure that the creator of the CString didn't make the mistake
- // of forcing length() to be the size of the buffer used to create the
- // string, prior to inserting the null terminator earlier in the
- // sequence.
- ASSERT(input.length() == strlen(string));
addBytes(reinterpret_cast<const uint8_t*>(string), input.length());
}
WTF_EXPORT_PRIVATE void addBytes(const uint8_t* input, size_t length);
@@ -63,7 +62,6 @@ public:
// type for computing SHA1 hash
typedef std::array<uint8_t, hashSize> Digest;
- // computeHash has a side effect of resetting the state of the object.
WTF_EXPORT_PRIVATE void computeHash(Digest&);
// Get a hex hash from the digest.
@@ -73,6 +71,9 @@ public:
WTF_EXPORT_PRIVATE CString computeHexDigest();
private:
+#if PLATFORM(COCOA)
+ CC_SHA1_CTX m_context;
+#else
void finalize();
void processBlock();
void reset();
@@ -81,6 +82,7 @@ private:
size_t m_cursor; // Number of bytes filled in m_buffer (0-64).
uint64_t m_totalBytes; // Number of bytes added so far.
uint32_t m_hash[5];
+#endif
};
} // namespace WTF
diff --git a/Source/WTF/wtf/SaturatedArithmetic.h b/Source/WTF/wtf/SaturatedArithmetic.h
index cf9e8e17e..516e3bcbb 100644
--- a/Source/WTF/wtf/SaturatedArithmetic.h
+++ b/Source/WTF/wtf/SaturatedArithmetic.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012, Google Inc. All rights reserved.
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -31,35 +32,72 @@
#ifndef SaturatedArithmetic_h
#define SaturatedArithmetic_h
+#include "Compiler.h"
#include <limits>
#include <stdint.h>
#include <stdlib.h>
-inline int32_t saturatedAddition(int32_t a, int32_t b)
+inline bool signedAddOverflows(int32_t a, int32_t b, int32_t& result)
{
+#if COMPILER_HAS_CLANG_BUILTIN(__builtin_sadd_overflow) && !(defined __clang_major__ && __clang_major__ < 7)
+ return __builtin_sadd_overflow(a, b, &result);
+#else
uint32_t ua = a;
uint32_t ub = b;
- uint32_t result = ua + ub;
+ uint32_t uresult = ua + ub;
+ result = static_cast<int32_t>(uresult);
// Can only overflow if the signed bit of the two values match. If the signed
// bit of the result and one of the values differ it did overflow.
- if (!((ua ^ ub) >> 31) & (result ^ ua) >> 31)
- result = std::numeric_limits<int>::max() + (ua >> 31);
+ return !((ua ^ ub) >> 31) && (uresult ^ ua) >> 31;
+#endif
+}
+inline int32_t saturatedAddition(int32_t a, int32_t b)
+{
+ int32_t result;
+#if CPU(ARM_THUMB2)
+ asm("qadd %[sum], %[addend], %[augend]"
+ : [sum]"=r"(result)
+ : [augend]"r"(a), [addend]"r"(b)
+ : /* Nothing is clobbered. */
+ );
+#else
+ if (signedAddOverflows(a, b, result))
+ result = std::numeric_limits<int32_t>::max() + (static_cast<uint32_t>(a) >> 31);
+#endif
return result;
}
-inline int32_t saturatedSubtraction(int32_t a, int32_t b)
+inline bool signedSubtractOverflows(int32_t a, int32_t b, int32_t& result)
{
+#if COMPILER_HAS_CLANG_BUILTIN(__builtin_ssub_overflow) && !(defined __clang_major__ && __clang_major__ < 7)
+ return __builtin_ssub_overflow(a, b, &result);
+#else
uint32_t ua = a;
uint32_t ub = b;
- uint32_t result = ua - ub;
+ uint32_t uresult = ua - ub;
+ result = static_cast<int32_t>(uresult);
// Can only overflow if the signed bit of the two values do not match. If the
// signed bit of the result and the first value differ it did overflow.
- if ((ua ^ ub) >> 31 & (result ^ ua) >> 31)
- result = std::numeric_limits<int>::max() + (ua >> 31);
+ return (ua ^ ub) >> 31 && (uresult ^ ua) >> 31;
+#endif
+}
+inline int32_t saturatedSubtraction(int32_t a, int32_t b)
+{
+ int32_t result;
+#if CPU(ARM_THUMB2)
+ asm("qsub %[difference], %[minuend], %[subtrahend]"
+ : [difference]"=r"(result)
+ : [minuend]"r"(a), [subtrahend]"r"(b)
+ : /* Nothing is clobbered. */
+ );
+#else
+ if (signedSubtractOverflows(a, b, result))
+ result = std::numeric_limits<int32_t>::max() + (static_cast<uint32_t>(a) >> 31);
+#endif
return result;
}
diff --git a/Source/WTF/wtf/SchedulePair.h b/Source/WTF/wtf/SchedulePair.h
new file mode 100644
index 000000000..cca971158
--- /dev/null
+++ b/Source/WTF/wtf/SchedulePair.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SchedulePair_h
+#define SchedulePair_h
+
+#include <wtf/HashSet.h>
+#include <wtf/RetainPtr.h>
+#include <wtf/ThreadSafeRefCounted.h>
+#include <wtf/text/StringHash.h>
+#include <wtf/text/WTFString.h>
+
+#if PLATFORM(COCOA) && !USE(CFURLCONNECTION)
+OBJC_CLASS NSRunLoop;
+#endif
+
+namespace WTF {
+
+class SchedulePair : public ThreadSafeRefCounted<SchedulePair> {
+public:
+ static Ref<SchedulePair> create(CFRunLoopRef runLoop, CFStringRef mode) { return adoptRef(*new SchedulePair(runLoop, mode)); }
+
+#if PLATFORM(COCOA) && !USE(CFURLCONNECTION)
+ static Ref<SchedulePair> create(NSRunLoop* runLoop, CFStringRef mode) { return adoptRef(*new SchedulePair(runLoop, mode)); }
+ NSRunLoop* nsRunLoop() const { return m_nsRunLoop.get(); }
+#endif
+
+ CFRunLoopRef runLoop() const { return m_runLoop.get(); }
+ CFStringRef mode() const { return m_mode.get(); }
+
+ WTF_EXPORT_PRIVATE bool operator==(const SchedulePair& other) const;
+
+private:
+ SchedulePair(CFRunLoopRef runLoop, CFStringRef mode)
+ : m_runLoop(runLoop)
+ {
+ if (mode)
+ m_mode = adoptCF(CFStringCreateCopy(0, mode));
+ }
+
+#if PLATFORM(COCOA) && !USE(CFURLCONNECTION)
+ WTF_EXPORT_PRIVATE SchedulePair(NSRunLoop*, CFStringRef);
+ RetainPtr<NSRunLoop*> m_nsRunLoop;
+#endif
+
+ RetainPtr<CFRunLoopRef> m_runLoop;
+ RetainPtr<CFStringRef> m_mode;
+};
+
+struct SchedulePairHash {
+ static unsigned hash(const RefPtr<SchedulePair>& pair)
+ {
+ uintptr_t hashCodes[2] = { reinterpret_cast<uintptr_t>(pair->runLoop()), pair->mode() ? CFHash(pair->mode()) : 0 };
+ return StringHasher::hashMemory<sizeof(hashCodes)>(hashCodes);
+ }
+
+ static bool equal(const RefPtr<SchedulePair>& a, const RefPtr<SchedulePair>& b) { return a == b; }
+
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+typedef HashSet<RefPtr<SchedulePair>, SchedulePairHash> SchedulePairHashSet;
+
+} // namespace WTF
+
+using WTF::SchedulePair;
+using WTF::SchedulePairHashSet;
+
+#endif
diff --git a/Source/WTF/wtf/gtk/MainThreadGtk.cpp b/Source/WTF/wtf/SchedulePairCF.cpp
index c057ea756..ce0a23c10 100644
--- a/Source/WTF/wtf/gtk/MainThreadGtk.cpp
+++ b/Source/WTF/wtf/SchedulePairCF.cpp
@@ -1,6 +1,5 @@
/*
- * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
- * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,25 +27,19 @@
*/
#include "config.h"
-#include "MainThread.h"
-
-#include <glib.h>
+#include "SchedulePair.h"
namespace WTF {
-void initializeMainThreadPlatform()
-{
-}
-
-static gboolean timeoutFired(gpointer)
-{
- dispatchFunctionsFromMainThread();
- return FALSE;
-}
-
-void scheduleDispatchFunctionsOnMainThread()
+bool SchedulePair::operator==(const SchedulePair& other) const
{
- g_idle_add_full(G_PRIORITY_DEFAULT, timeoutFired, 0, 0);
+ if (runLoop() != other.runLoop())
+ return false;
+ CFStringRef thisMode = mode();
+ CFStringRef otherMode = other.mode();
+ if (!thisMode || !otherMode)
+ return thisMode == otherMode;
+ return CFEqual(thisMode, otherMode);
}
-} // namespace WTF
+} // namespace
diff --git a/Source/WTF/wtf/Scope.h b/Source/WTF/wtf/Scope.h
new file mode 100644
index 000000000..97131d459
--- /dev/null
+++ b/Source/WTF/wtf/Scope.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <type_traits>
+#include <wtf/StdLibExtras.h>
+
+// Based on http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0052r2.pdf
+
+namespace WTF {
+
+template<typename ExitFunction>
+class ScopeExit final {
+public:
+ template<typename ExitFunctionParameter>
+ explicit ScopeExit(ExitFunctionParameter&& exitFunction)
+ : m_exitFunction(std::forward<ExitFunction>(exitFunction))
+ {
+ }
+
+ ScopeExit(ScopeExit&& other)
+ : m_exitFunction(WTFMove(other.m_exitFunction))
+ , m_executeOnDestruction(std::exchange(other.m_executeOnDestruction, false))
+ {
+ }
+
+ ~ScopeExit()
+ {
+ if (m_executeOnDestruction)
+ m_exitFunction();
+ }
+
+ void release()
+ {
+ m_executeOnDestruction = false;
+ }
+
+ ScopeExit(const ScopeExit&) = delete;
+ ScopeExit& operator=(const ScopeExit&) = delete;
+ ScopeExit& operator=(ScopeExit&&) = delete;
+
+private:
+ ExitFunction m_exitFunction;
+ bool m_executeOnDestruction { true };
+};
+
+template<typename ExitFunction>
+ScopeExit<ExitFunction> makeScopeExit(ExitFunction&& exitFunction)
+{
+ return ScopeExit<ExitFunction>(std::forward<ExitFunction>(exitFunction));
+}
+
+}
+
+using WTF::ScopeExit;
+using WTF::makeScopeExit;
diff --git a/Source/WTF/wtf/ScopedLambda.h b/Source/WTF/wtf/ScopedLambda.h
new file mode 100644
index 000000000..8b4ab3860
--- /dev/null
+++ b/Source/WTF/wtf/ScopedLambda.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ScopedLambda_h
+#define ScopedLambda_h
+
+namespace WTF {
+
+// You can use ScopedLambda to efficiently pass lambdas without allocating memory or requiring
+// template specialization of the callee. The callee should be declared as:
+//
+// void foo(const ScopedLambda<MyThings* (int, Stuff&)>&);
+//
+// The caller just does:
+//
+// void foo(scopedLambda<MyThings* (int, Stuff&)>([&] (int x, Stuff& y) -> MyThings* { blah }));
+//
+// Note that this relies on foo() not escaping the lambda. The lambda is only valid while foo() is
+// on the stack - hence the name ScopedLambda.
+
+template<typename FunctionType> class ScopedLambda;
+template<typename ResultType, typename... ArgumentTypes>
+class ScopedLambda<ResultType (ArgumentTypes...)> {
+public:
+ ScopedLambda(ResultType (*impl)(void* arg, ArgumentTypes...) = nullptr, void* arg = nullptr)
+ : m_impl(impl)
+ , m_arg(arg)
+ {
+ }
+
+ template<typename... PassedArgumentTypes>
+ ResultType operator()(PassedArgumentTypes&&... arguments) const
+ {
+ return m_impl(m_arg, std::forward<PassedArgumentTypes>(arguments)...);
+ }
+
+private:
+ ResultType (*m_impl)(void* arg, ArgumentTypes...);
+ void *m_arg;
+};
+
+template<typename FunctionType, typename Functor> class ScopedLambdaFunctor;
+template<typename ResultType, typename... ArgumentTypes, typename Functor>
+class ScopedLambdaFunctor<ResultType (ArgumentTypes...), Functor> : public ScopedLambda<ResultType (ArgumentTypes...)> {
+public:
+ template<typename PassedFunctor>
+ ScopedLambdaFunctor(PassedFunctor&& functor)
+ : ScopedLambda<ResultType (ArgumentTypes...)>(implFunction, this)
+ , m_functor(std::forward<PassedFunctor>(functor))
+ {
+ }
+
+ // We need to make sure that copying and moving ScopedLambdaFunctor results in a ScopedLambdaFunctor
+ // whose ScopedLambda supertype still points to this rather than other.
+ ScopedLambdaFunctor(const ScopedLambdaFunctor& other)
+ : ScopedLambda<ResultType (ArgumentTypes...)>(implFunction, this)
+ , m_functor(other.m_functor)
+ {
+ }
+
+ ScopedLambdaFunctor(ScopedLambdaFunctor&& other)
+ : ScopedLambda<ResultType (ArgumentTypes...)>(implFunction, this)
+ , m_functor(WTFMove(other.m_functor))
+ {
+ }
+
+ ScopedLambdaFunctor& operator=(const ScopedLambdaFunctor& other)
+ {
+ m_functor = other.m_functor;
+ return *this;
+ }
+
+ ScopedLambdaFunctor& operator=(ScopedLambdaFunctor&& other)
+ {
+ m_functor = WTFMove(other.m_functor);
+ return *this;
+ }
+
+private:
+ static ResultType implFunction(void* argument, ArgumentTypes... arguments)
+ {
+ return static_cast<ScopedLambdaFunctor*>(argument)->m_functor(arguments...);
+ }
+
+ Functor m_functor;
+};
+
+// Can't simply rely on perfect forwarding because then the ScopedLambdaFunctor would point to the functor
+// by const reference. This would be surprising in situations like:
+//
+// auto scopedLambda = scopedLambda<Foo(Bar)>([&] (Bar) -> Foo { ... });
+//
+// We expected scopedLambda to be valid for its entire lifetime, but if it computed the lambda by reference
+// then it would be immediately invalid.
+template<typename FunctionType, typename Functor>
+ScopedLambdaFunctor<FunctionType, Functor> scopedLambda(const Functor& functor)
+{
+ return ScopedLambdaFunctor<FunctionType, Functor>(functor);
+}
+
+template<typename FunctionType, typename Functor>
+ScopedLambdaFunctor<FunctionType, Functor> scopedLambda(Functor&& functor)
+{
+ return ScopedLambdaFunctor<FunctionType, Functor>(WTFMove(functor));
+}
+
+template<typename FunctionType, typename Functor> class ScopedLambdaRefFunctor;
+template<typename ResultType, typename... ArgumentTypes, typename Functor>
+class ScopedLambdaRefFunctor<ResultType (ArgumentTypes...), Functor> : public ScopedLambda<ResultType (ArgumentTypes...)> {
+public:
+ ScopedLambdaRefFunctor(const Functor& functor)
+ : ScopedLambda<ResultType (ArgumentTypes...)>(implFunction, this)
+ , m_functor(&functor)
+ {
+ }
+
+ // We need to make sure that copying and moving ScopedLambdaRefFunctor results in a
+ // ScopedLambdaRefFunctor whose ScopedLambda supertype still points to this rather than
+ // other.
+ ScopedLambdaRefFunctor(const ScopedLambdaRefFunctor& other)
+ : ScopedLambda<ResultType (ArgumentTypes...)>(implFunction, this)
+ , m_functor(other.m_functor)
+ {
+ }
+
+ ScopedLambdaRefFunctor(ScopedLambdaRefFunctor&& other)
+ : ScopedLambda<ResultType (ArgumentTypes...)>(implFunction, this)
+ , m_functor(other.m_functor)
+ {
+ }
+
+ ScopedLambdaRefFunctor& operator=(const ScopedLambdaRefFunctor& other)
+ {
+ m_functor = other.m_functor;
+ return *this;
+ }
+
+ ScopedLambdaRefFunctor& operator=(ScopedLambdaRefFunctor&& other)
+ {
+ m_functor = other.m_functor;
+ return *this;
+ }
+
+private:
+ static ResultType implFunction(void* argument, ArgumentTypes... arguments)
+ {
+ return (*static_cast<ScopedLambdaRefFunctor*>(argument)->m_functor)(arguments...);
+ }
+
+ const Functor* m_functor;
+};
+
+// This is for when you already refer to a functor by reference, and you know its lifetime is
+// good. This just creates a ScopedLambda that points to your functor.
+template<typename FunctionType, typename Functor>
+ScopedLambdaRefFunctor<FunctionType, Functor> scopedLambdaRef(const Functor& functor)
+{
+ return ScopedLambdaRefFunctor<FunctionType, Functor>(functor);
+}
+
+} // namespace WTF
+
+using WTF::ScopedLambda;
+using WTF::scopedLambda;
+using WTF::scopedLambdaRef;
+
+#endif // ScopedLambda_h
+
diff --git a/Source/WTF/wtf/Seconds.cpp b/Source/WTF/wtf/Seconds.cpp
new file mode 100644
index 000000000..c9d300328
--- /dev/null
+++ b/Source/WTF/wtf/Seconds.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Seconds.h"
+
+#include "CurrentTime.h"
+#include "MonotonicTime.h"
+#include "PrintStream.h"
+#include "TimeWithDynamicClockType.h"
+#include "WallTime.h"
+
+namespace WTF {
+
+WallTime Seconds::operator+(WallTime other) const
+{
+ return other + *this;
+}
+
+MonotonicTime Seconds::operator+(MonotonicTime other) const
+{
+ return other + *this;
+}
+
+TimeWithDynamicClockType Seconds::operator+(const TimeWithDynamicClockType& other) const
+{
+ return other + *this;
+}
+
+WallTime Seconds::operator-(WallTime other) const
+{
+ return WallTime::fromRawSeconds(value() - other.secondsSinceEpoch().value());
+}
+
+MonotonicTime Seconds::operator-(MonotonicTime other) const
+{
+ return MonotonicTime::fromRawSeconds(value() - other.secondsSinceEpoch().value());
+}
+
+TimeWithDynamicClockType Seconds::operator-(const TimeWithDynamicClockType& other) const
+{
+ return other.withSameClockAndRawSeconds(value() - other.secondsSinceEpoch().value());
+}
+
+void Seconds::dump(PrintStream& out) const
+{
+ out.print(m_value, " sec");
+}
+
+void sleep(Seconds value)
+{
+ sleep(value.value());
+}
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/Seconds.h b/Source/WTF/wtf/Seconds.h
new file mode 100644
index 000000000..5b8a7a369
--- /dev/null
+++ b/Source/WTF/wtf/Seconds.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Seconds_h
+#define WTF_Seconds_h
+
+#include <wtf/MathExtras.h>
+
+namespace WTF {
+
+class MonotonicTime;
+class PrintStream;
+class TimeWithDynamicClockType;
+class WallTime;
+
+class Seconds {
+public:
+ Seconds() { }
+
+ explicit constexpr Seconds(double value)
+ : m_value(value)
+ {
+ }
+
+ double value() const { return m_value; }
+
+ double minutes() const { return m_value / 60; }
+ double seconds() const { return m_value; }
+ double milliseconds() const { return seconds() * 1000; }
+ double microseconds() const { return milliseconds() * 1000; }
+ double nanoseconds() const { return microseconds() * 1000; }
+
+ static constexpr Seconds fromMinutes(double minutes)
+ {
+ return Seconds(minutes * 60);
+ }
+
+ static constexpr Seconds fromMilliseconds(double milliseconds)
+ {
+ return Seconds(milliseconds / 1000);
+ }
+
+ static constexpr Seconds fromMicroseconds(double microseconds)
+ {
+ return fromMilliseconds(microseconds / 1000);
+ }
+
+ static constexpr Seconds fromNanoseconds(double nanoseconds)
+ {
+ return fromMicroseconds(nanoseconds / 1000);
+ }
+
+ static constexpr Seconds infinity()
+ {
+ return Seconds(std::numeric_limits<double>::infinity());
+ }
+
+ explicit operator bool() const { return !!m_value; }
+
+ Seconds operator+(Seconds other) const
+ {
+ return Seconds(value() + other.value());
+ }
+
+ Seconds operator-(Seconds other) const
+ {
+ return Seconds(value() - other.value());
+ }
+
+ Seconds operator-() const
+ {
+ return Seconds(-value());
+ }
+
+ // It makes sense to consider scaling a duration, like, "I want to wait 5 times as long as
+ // last time!".
+ Seconds operator*(double scalar) const
+ {
+ return Seconds(value() * scalar);
+ }
+
+ Seconds operator/(double scalar) const
+ {
+ return Seconds(value() / scalar);
+ }
+
+ // It's reasonable to think about ratios between Seconds.
+ double operator/(Seconds other) const
+ {
+ return value() / other.value();
+ }
+
+ Seconds operator%(double scalar) const
+ {
+ return Seconds(fmod(value(), scalar));
+ }
+
+ // This solves for r, where:
+ //
+ // floor(this / other) + r / other = this / other
+ //
+ // Therefore, if this is Seconds then r is Seconds.
+ Seconds operator%(Seconds other) const
+ {
+ return Seconds(fmod(value(), other.value()));
+ }
+
+ Seconds& operator+=(Seconds other)
+ {
+ return *this = *this + other;
+ }
+
+ Seconds& operator-=(Seconds other)
+ {
+ return *this = *this - other;
+ }
+
+ Seconds& operator*=(double scalar)
+ {
+ return *this = *this * scalar;
+ }
+
+ Seconds& operator/=(double scalar)
+ {
+ return *this = *this / scalar;
+ }
+
+ Seconds& operator%=(double scalar)
+ {
+ return *this = *this % scalar;
+ }
+
+ Seconds& operator%=(Seconds other)
+ {
+ return *this = *this % other;
+ }
+
+ WTF_EXPORT_PRIVATE WallTime operator+(WallTime) const;
+ WTF_EXPORT_PRIVATE MonotonicTime operator+(MonotonicTime) const;
+ WTF_EXPORT_PRIVATE TimeWithDynamicClockType operator+(const TimeWithDynamicClockType&) const;
+
+ WTF_EXPORT_PRIVATE WallTime operator-(WallTime) const;
+ WTF_EXPORT_PRIVATE MonotonicTime operator-(MonotonicTime) const;
+ WTF_EXPORT_PRIVATE TimeWithDynamicClockType operator-(const TimeWithDynamicClockType&) const;
+
+ bool operator==(Seconds other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ bool operator!=(Seconds other) const
+ {
+ return m_value != other.m_value;
+ }
+
+ bool operator<(Seconds other) const
+ {
+ return m_value < other.m_value;
+ }
+
+ bool operator>(Seconds other) const
+ {
+ return m_value > other.m_value;
+ }
+
+ bool operator<=(Seconds other) const
+ {
+ return m_value <= other.m_value;
+ }
+
+ bool operator>=(Seconds other) const
+ {
+ return m_value >= other.m_value;
+ }
+
+ WTF_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+private:
+ double m_value { 0 };
+};
+
+inline namespace seconds_literals {
+
+constexpr Seconds operator"" _min(long double minutes)
+{
+ return Seconds::fromMinutes(minutes);
+}
+
+constexpr Seconds operator"" _s(long double seconds)
+{
+ return Seconds(seconds);
+}
+
+constexpr Seconds operator"" _ms(long double milliseconds)
+{
+ return Seconds::fromMilliseconds(milliseconds);
+}
+
+constexpr Seconds operator"" _us(long double microseconds)
+{
+ return Seconds::fromMicroseconds(microseconds);
+}
+
+constexpr Seconds operator"" _ns(long double nanoseconds)
+{
+ return Seconds::fromNanoseconds(nanoseconds);
+}
+
+constexpr Seconds operator"" _min(unsigned long long minutes)
+{
+ return Seconds::fromMinutes(minutes);
+}
+
+constexpr Seconds operator"" _s(unsigned long long seconds)
+{
+ return Seconds(seconds);
+}
+
+constexpr Seconds operator"" _ms(unsigned long long milliseconds)
+{
+ return Seconds::fromMilliseconds(milliseconds);
+}
+
+constexpr Seconds operator"" _us(unsigned long long microseconds)
+{
+ return Seconds::fromMicroseconds(microseconds);
+}
+
+constexpr Seconds operator"" _ns(unsigned long long nanoseconds)
+{
+ return Seconds::fromNanoseconds(nanoseconds);
+}
+
+} // inline seconds_literals
+
+WTF_EXPORT_PRIVATE void sleep(Seconds);
+
+} // namespace WTF
+
+using namespace WTF::seconds_literals;
+using WTF::Seconds;
+
+#endif // WTF_Seconds_h
diff --git a/Source/WTF/wtf/SegmentedVector.h b/Source/WTF/wtf/SegmentedVector.h
index 048aa5366..3e3fe0072 100644
--- a/Source/WTF/wtf/SegmentedVector.h
+++ b/Source/WTF/wtf/SegmentedVector.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -35,65 +35,50 @@
namespace WTF {
// An iterator for SegmentedVector. It supports only the pre ++ operator
- template <typename T, size_t SegmentSize = 8, size_t InlineCapacity = 32> class SegmentedVector;
- template <typename T, size_t SegmentSize = 8, size_t InlineCapacity = 32> class SegmentedVectorIterator {
+ template <typename T, size_t SegmentSize = 8> class SegmentedVector;
+ template <typename T, size_t SegmentSize = 8> class SegmentedVectorIterator {
private:
- friend class SegmentedVector<T, SegmentSize, InlineCapacity>;
+ friend class SegmentedVector<T, SegmentSize>;
public:
- typedef SegmentedVectorIterator<T, SegmentSize, InlineCapacity> Iterator;
+ typedef SegmentedVectorIterator<T, SegmentSize> Iterator;
~SegmentedVectorIterator() { }
- T& operator*() const { return m_vector.m_segments.at(m_segment)->at(m_index); }
- T* operator->() const { return &m_vector.m_segments.at(m_segment)->at(m_index); }
+ T& operator*() const { return m_vector.at(m_index); }
+ T* operator->() const { return &m_vector.at(m_index); }
// Only prefix ++ operator supported
Iterator& operator++()
{
- ASSERT(m_index != SegmentSize);
- ++m_index;
- if (m_index >= m_vector.m_segments.at(m_segment)->size()) {
- if (m_segment + 1 < m_vector.m_segments.size()) {
- ASSERT(m_vector.m_segments.at(m_segment)->size() > 0);
- ++m_segment;
- m_index = 0;
- } else {
- // Points to the "end" symbol
- m_segment = 0;
- m_index = SegmentSize;
- }
- }
+ m_index++;
return *this;
}
bool operator==(const Iterator& other) const
{
- return m_index == other.m_index && m_segment == other.m_segment && &m_vector == &other.m_vector;
+ return m_index == other.m_index && &m_vector == &other.m_vector;
}
bool operator!=(const Iterator& other) const
{
- return m_index != other.m_index || m_segment != other.m_segment || &m_vector != &other.m_vector;
+ return m_index != other.m_index || &m_vector != &other.m_vector;
}
- SegmentedVectorIterator& operator=(const SegmentedVectorIterator<T, SegmentSize, InlineCapacity>& other)
+ SegmentedVectorIterator& operator=(const SegmentedVectorIterator<T, SegmentSize>& other)
{
m_vector = other.m_vector;
- m_segment = other.m_segment;
m_index = other.m_index;
return *this;
}
private:
- SegmentedVectorIterator(SegmentedVector<T, SegmentSize, InlineCapacity>& vector, size_t segment, size_t index)
+ SegmentedVectorIterator(SegmentedVector<T, SegmentSize>& vector, size_t index)
: m_vector(vector)
- , m_segment(segment)
, m_index(index)
{
}
- SegmentedVector<T, SegmentSize, InlineCapacity>& m_vector;
- size_t m_segment;
+ SegmentedVector<T, SegmentSize>& m_vector;
size_t m_index;
};
@@ -101,19 +86,17 @@ namespace WTF {
// stored in its buffer when it grows. Therefore, it is safe to keep
// pointers into a SegmentedVector. The default tuning values are
// optimized for segmented vectors that get large; you may want to use
- // SegmentedVector<thingy, 1, 0> if you don't expect a lot of entries.
- template <typename T, size_t SegmentSize, size_t InlineCapacity>
+ // SegmentedVector<thingy, 1> if you don't expect a lot of entries.
+ template <typename T, size_t SegmentSize>
class SegmentedVector {
- friend class SegmentedVectorIterator<T, SegmentSize, InlineCapacity>;
+ friend class SegmentedVectorIterator<T, SegmentSize>;
WTF_MAKE_NONCOPYABLE(SegmentedVector);
+ WTF_MAKE_FAST_ALLOCATED;
public:
- typedef SegmentedVectorIterator<T, SegmentSize, InlineCapacity> Iterator;
+ typedef SegmentedVectorIterator<T, SegmentSize> Iterator;
- SegmentedVector()
- : m_size(0)
- {
- }
+ SegmentedVector() = default;
~SegmentedVector()
{
@@ -125,12 +108,13 @@ namespace WTF {
T& at(size_t index)
{
- return segmentFor(index)->at(subscriptFor(index));
+ ASSERT_WITH_SECURITY_IMPLICATION(index < m_size);
+ return segmentFor(index)->entries[subscriptFor(index)];
}
const T& at(size_t index) const
{
- return const_cast<SegmentedVector<T, SegmentSize, InlineCapacity>*>(this)->at(index);
+ return const_cast<SegmentedVector<T, SegmentSize>*>(this)->at(index);
}
T& operator[](size_t index)
@@ -143,29 +127,54 @@ namespace WTF {
return at(index);
}
+ T& first()
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(!isEmpty());
+ return at(0);
+ }
+ const T& first() const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(!isEmpty());
+ return at(0);
+ }
T& last()
{
+ ASSERT_WITH_SECURITY_IMPLICATION(!isEmpty());
+ return at(size() - 1);
+ }
+ const T& last() const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(!isEmpty());
return at(size() - 1);
}
- template <typename U> void append(const U& value)
+ T takeLast()
{
- ++m_size;
+ ASSERT_WITH_SECURITY_IMPLICATION(!isEmpty());
+ T result = WTFMove(last());
+ --m_size;
+ return result;
+ }
+ template<typename... Args>
+ void append(Args&&... args)
+ {
+ ++m_size;
if (!segmentExistsFor(m_size - 1))
- m_segments.append(new Segment);
- segmentFor(m_size - 1)->uncheckedAppend(value);
+ allocateSegment();
+ new (NotNull, &last()) T(std::forward<Args>(args)...);
}
- T& alloc()
+ template<typename... Args>
+ T& alloc(Args&&... args)
{
- append<T>(T());
+ append(std::forward<Args>(args)...);
return last();
}
void removeLast()
{
- segmentFor(m_size - 1)->removeLast();
+ last().~T();
--m_size;
}
@@ -173,7 +182,10 @@ namespace WTF {
{
ASSERT(size > m_size);
ensureSegmentsFor(size);
+ size_t oldSize = m_size;
m_size = size;
+ for (size_t i = oldSize; i < m_size; ++i)
+ new (NotNull, &at(i)) T();
}
void clear()
@@ -185,12 +197,12 @@ namespace WTF {
Iterator begin()
{
- return Iterator(*this, 0, m_size ? 0 : SegmentSize);
+ return Iterator(*this, 0);
}
Iterator end()
{
- return Iterator(*this, 0, SegmentSize);
+ return Iterator(*this, m_size);
}
void shrinkToFit()
@@ -199,12 +211,23 @@ namespace WTF {
}
private:
- typedef Vector<T, SegmentSize> Segment;
+ struct Segment {
+#if COMPILER(MSVC)
+#pragma warning(push)
+#pragma warning(disable: 4200)
+#endif
+ T entries[0];
+#if COMPILER(MSVC)
+#pragma warning(pop)
+#endif
+ };
void deleteAllSegments()
{
- for (size_t i = 0; i < m_segments.size(); i++)
- delete m_segments[i];
+ for (size_t i = 0; i < m_size; ++i)
+ at(i).~T();
+ for (size_t i = 0; i < m_segments.size(); ++i)
+ fastFree(m_segments[i]);
}
bool segmentExistsFor(size_t index)
@@ -227,24 +250,23 @@ namespace WTF {
size_t segmentCount = (m_size + SegmentSize - 1) / SegmentSize;
size_t neededSegmentCount = (size + SegmentSize - 1) / SegmentSize;
- // Fill up to N - 1 segments.
- size_t end = neededSegmentCount - 1;
- for (size_t i = segmentCount ? segmentCount - 1 : 0; i < end; ++i)
- ensureSegment(i, SegmentSize);
-
- // Grow segment N to accomodate the remainder.
- ensureSegment(end, subscriptFor(size - 1) + 1);
+ for (size_t i = segmentCount ? segmentCount - 1 : 0; i < neededSegmentCount; ++i)
+ ensureSegment(i);
}
- void ensureSegment(size_t segmentIndex, size_t size)
+ void ensureSegment(size_t segmentIndex)
{
ASSERT_WITH_SECURITY_IMPLICATION(segmentIndex <= m_segments.size());
if (segmentIndex == m_segments.size())
- m_segments.append(new Segment);
- m_segments[segmentIndex]->grow(size);
+ allocateSegment();
+ }
+
+ void allocateSegment()
+ {
+ m_segments.append(static_cast<Segment*>(fastMalloc(sizeof(T) * SegmentSize)));
}
- size_t m_size;
+ size_t m_size { 0 };
Vector<Segment*> m_segments;
};
diff --git a/Source/WTF/wtf/SentinelLinkedList.h b/Source/WTF/wtf/SentinelLinkedList.h
index 134ce80c7..60eb7f049 100644
--- a/Source/WTF/wtf/SentinelLinkedList.h
+++ b/Source/WTF/wtf/SentinelLinkedList.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -43,6 +43,7 @@ enum SentinelTag { Sentinel };
template<typename T>
class BasicRawSentinelNode {
+ WTF_MAKE_FAST_ALLOCATED;
public:
BasicRawSentinelNode(SentinelTag)
: m_next(0)
@@ -69,6 +70,9 @@ public:
}
void remove();
+
+ void prepend(BasicRawSentinelNode*);
+ void append(BasicRawSentinelNode*);
private:
BasicRawSentinelNode* m_next;
@@ -81,8 +85,15 @@ public:
SentinelLinkedList();
+ // Pushes to the front of the list. It's totally backwards from what you'd expect.
void push(T*);
+
+ // Appends to the end of the list.
+ void append(T*);
+
static void remove(T*);
+ static void prepend(T* existingNode, T* newNode);
+ static void append(T* existingNode, T* newNode);
bool isOnList(T*);
@@ -90,7 +101,19 @@ public:
iterator end();
bool isEmpty() { return begin() == end(); }
-
+
+ template<typename Func>
+ void forEach(const Func& func)
+ {
+ for (iterator iter = begin(); iter != end();) {
+ iterator next = iter->next();
+ func(iter);
+ iter = next;
+ }
+ }
+
+ void takeFrom(SentinelLinkedList<T, RawNode>&);
+
private:
RawNode m_headSentinel;
RawNode m_tailSentinel;
@@ -101,6 +124,18 @@ template <typename T> void BasicRawSentinelNode<T>::remove()
SentinelLinkedList<T, BasicRawSentinelNode<T>>::remove(static_cast<T*>(this));
}
+template <typename T> void BasicRawSentinelNode<T>::prepend(BasicRawSentinelNode* node)
+{
+ SentinelLinkedList<T, BasicRawSentinelNode<T>>::prepend(
+ static_cast<T*>(this), static_cast<T*>(node));
+}
+
+template <typename T> void BasicRawSentinelNode<T>::append(BasicRawSentinelNode* node)
+{
+ SentinelLinkedList<T, BasicRawSentinelNode<T>>::append(
+ static_cast<T*>(this), static_cast<T*>(node));
+}
+
template <typename T, typename RawNode> inline SentinelLinkedList<T, RawNode>::SentinelLinkedList()
: m_headSentinel(Sentinel)
, m_tailSentinel(Sentinel)
@@ -138,6 +173,22 @@ template <typename T, typename RawNode> inline void SentinelLinkedList<T, RawNod
next->setPrev(node);
}
+template <typename T, typename RawNode> inline void SentinelLinkedList<T, RawNode>::append(T* node)
+{
+ ASSERT(node);
+ ASSERT(!node->prev());
+ ASSERT(!node->next());
+
+ RawNode* prev = m_tailSentinel.prev();
+ RawNode* next = &m_tailSentinel;
+
+ node->setPrev(prev);
+ node->setNext(next);
+
+ prev->setNext(node);
+ next->setPrev(node);
+}
+
template <typename T, typename RawNode> inline void SentinelLinkedList<T, RawNode>::remove(T* node)
{
ASSERT(node);
@@ -154,6 +205,44 @@ template <typename T, typename RawNode> inline void SentinelLinkedList<T, RawNod
node->setNext(0);
}
+template <typename T, typename RawNode>
+inline void SentinelLinkedList<T, RawNode>::prepend(T* existingNode, T* newNode)
+{
+ ASSERT(existingNode);
+ ASSERT(!!existingNode->prev());
+ ASSERT(!!existingNode->next());
+ ASSERT(newNode);
+ ASSERT(!newNode->prev());
+ ASSERT(!newNode->next());
+
+ RawNode* prev = existingNode->prev();
+
+ newNode->setNext(existingNode);
+ newNode->setPrev(prev);
+
+ prev->setNext(newNode);
+ existingNode->setPrev(newNode);
+}
+
+template <typename T, typename RawNode>
+inline void SentinelLinkedList<T, RawNode>::append(T* existingNode, T* newNode)
+{
+ ASSERT(existingNode);
+ ASSERT(!!existingNode->prev());
+ ASSERT(!!existingNode->next());
+ ASSERT(newNode);
+ ASSERT(!newNode->prev());
+ ASSERT(!newNode->next());
+
+ RawNode* next = existingNode->next();
+
+ newNode->setNext(next);
+ newNode->setPrev(existingNode);
+
+ next->setPrev(newNode);
+ existingNode->setNext(newNode);
+}
+
template <typename T, typename RawNode> inline bool SentinelLinkedList<T, RawNode>::isOnList(T* node)
{
if (!node->isOnList())
@@ -167,6 +256,22 @@ template <typename T, typename RawNode> inline bool SentinelLinkedList<T, RawNod
return false;
}
+template <typename T, typename RawNode>
+inline void SentinelLinkedList<T, RawNode>::takeFrom(SentinelLinkedList<T, RawNode>& other)
+{
+ if (other.isEmpty())
+ return;
+
+ m_tailSentinel.prev()->setNext(other.m_headSentinel.next());
+ other.m_headSentinel.next()->setPrev(m_tailSentinel.prev());
+
+ m_tailSentinel.setPrev(other.m_tailSentinel.prev());
+ m_tailSentinel.prev()->setNext(&m_tailSentinel);
+
+ other.m_headSentinel.setNext(&other.m_tailSentinel);
+ other.m_tailSentinel.setPrev(&other.m_headSentinel);
+}
+
}
using WTF::BasicRawSentinelNode;
diff --git a/Source/WTF/wtf/TemporaryChange.h b/Source/WTF/wtf/SetForScope.h
index 95df1728b..785d9cb34 100644
--- a/Source/WTF/wtf/TemporaryChange.h
+++ b/Source/WTF/wtf/SetForScope.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011 Google Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,38 +24,42 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef TemporaryChange_h
-#define TemporaryChange_h
+#pragma once
#include <wtf/Noncopyable.h>
+#include <wtf/StdLibExtras.h>
namespace WTF {
-// TemporaryChange<> is useful for setting a variable to a new value only within a
-// particular scope. An TemporaryChange<> object changes a variable to its original
+// SetForScope<> is useful for setting a variable to a new value only within a
+// particular scope. An SetForScope<> object changes a variable to its original
// value upon destruction, making it an alternative to writing "var = false;"
// or "var = oldVal;" at all of a block's exit points.
//
-// This should be obvious, but note that an TemporaryChange<> instance should have a
+// This should be obvious, but note that an SetForScope<> instance should have a
// shorter lifetime than its scopedVariable, to prevent invalid memory writes
-// when the TemporaryChange<> object is destroyed.
+// when the SetForScope<> object is destroyed.
template<typename T>
-class TemporaryChange {
- WTF_MAKE_NONCOPYABLE(TemporaryChange);
+class SetForScope {
+ WTF_MAKE_NONCOPYABLE(SetForScope);
public:
- TemporaryChange(T& scopedVariable, T newValue)
+ SetForScope(T& scopedVariable)
: m_scopedVariable(scopedVariable)
, m_originalValue(scopedVariable)
{
- m_scopedVariable = newValue;
}
-
- ~TemporaryChange()
+ template<typename U>
+ SetForScope(T& scopedVariable, U&& newValue)
+ : SetForScope(scopedVariable)
{
- m_scopedVariable = m_originalValue;
+ m_scopedVariable = std::forward<U>(newValue);
}
+ ~SetForScope()
+ {
+ m_scopedVariable = WTFMove(m_originalValue);
+ }
private:
T& m_scopedVariable;
@@ -63,6 +68,4 @@ private:
}
-using WTF::TemporaryChange;
-
-#endif
+using WTF::SetForScope;
diff --git a/Source/WTF/wtf/SharedTask.h b/Source/WTF/wtf/SharedTask.h
new file mode 100644
index 000000000..cf9d119f6
--- /dev/null
+++ b/Source/WTF/wtf/SharedTask.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SharedTask_h
+#define SharedTask_h
+
+#include <wtf/Ref.h>
+#include <wtf/ThreadSafeRefCounted.h>
+
+namespace WTF {
+
+// SharedTask is a replacement for std::function for cases where:
+//
+// - You'd like to avoid the cost of copying, and would prefer to have reference semantics rather
+// than value semantics.
+// - You want to use FastMalloc rather than system malloc. Note that std::function may avoid malloc
+// entirely in some cases, but that's hard to guarantee.
+// - You intend to share the task with other threads and so want thread-safe reference counting.
+//
+// Here's an example of how SharedTask can be better than std::function. If you do:
+//
+// std::function<int(double)> a = b;
+//
+// Then "a" will get its own copy of all captured by-value variables. The act of copying may
+// require calls to system malloc, and it may be linear time in the total size of captured
+// variables. On the other hand, if you do:
+//
+// RefPtr<SharedTask<int(double)> a = b;
+//
+// Then "a" will point to the same task as b, and the only work involved is the CAS to increase the
+// reference count.
+//
+// Also, SharedTask allows for more flexibility when sharing state between everyone who runs the
+// task. With std::function, you can only share state using by-reference captured variables.
+// SharedTask supports this since, like std::function, it can be built from a lambda (see
+// createSharedTask(), below). But SharedTask also allows you to create your own subclass and put
+// state in member fields. This can be more natural if you want fine-grained control over what
+// state is shared between instances of the task.
+template<typename FunctionType> class SharedTask;
+template<typename ResultType, typename... ArgumentTypes>
+class SharedTask<ResultType (ArgumentTypes...)> : public ThreadSafeRefCounted<SharedTask<ResultType (ArgumentTypes...)>> {
+public:
+ SharedTask() { }
+ virtual ~SharedTask() { }
+
+ virtual ResultType run(ArgumentTypes...) = 0;
+};
+
+// This is a utility class that allows you to create a SharedTask subclass using a lambda. Usually,
+// you don't want to use this class directly. Use createSharedTask() instead.
+template<typename FunctionType, typename Functor> class SharedTaskFunctor;
+template<typename ResultType, typename... ArgumentTypes, typename Functor>
+class SharedTaskFunctor<ResultType (ArgumentTypes...), Functor> : public SharedTask<ResultType (ArgumentTypes...)> {
+public:
+ SharedTaskFunctor(const Functor& functor)
+ : m_functor(functor)
+ {
+ }
+
+ SharedTaskFunctor(Functor&& functor)
+ : m_functor(WTFMove(functor))
+ {
+ }
+
+private:
+ ResultType run(ArgumentTypes... arguments) override
+ {
+ return m_functor(arguments...);
+ }
+
+ Functor m_functor;
+};
+
+// Create a SharedTask from a functor, such as a lambda. You can use this like so:
+//
+// RefPtr<SharedTask<void()>> task = createSharedTask<void()>(
+// [=] () {
+// do things;
+// });
+//
+// Note that if you use the [&] capture list, then you're probably doing it wrong. That's because
+// [&] will lead to pointers to the stack (the only exception is if you do something like &x where
+// x is a reference to the heap - but in that case, it's better to use [=, &x] to be explicit). You
+// probably don't want pointers to the stack if you will have tasks running on other threads.
+// Probably the best way to be sure that you're not making a horrible mistake is to always use
+// explicit capture lists. In many cases, [this] is sufficient.
+//
+// On the other hand, if you use something like ParallelHelperClient::runTaskInParallel() (or its
+// helper, runFunctionInParallel(), which does createSharedTask() for you), then it can be OK to
+// use [&], since the stack frame will remain live for the entire duration of the task's lifetime.
+template<typename FunctionType, typename Functor>
+Ref<SharedTask<FunctionType>> createSharedTask(const Functor& functor)
+{
+ return adoptRef(*new SharedTaskFunctor<FunctionType, Functor>(functor));
+}
+template<typename FunctionType, typename Functor>
+Ref<SharedTask<FunctionType>> createSharedTask(Functor&& functor)
+{
+ return adoptRef(*new SharedTaskFunctor<FunctionType, Functor>(WTFMove(functor)));
+}
+
+} // namespace WTF
+
+using WTF::createSharedTask;
+using WTF::SharedTask;
+using WTF::SharedTaskFunctor;
+
+#endif // SharedTask_h
+
diff --git a/Source/WTF/wtf/SimpleStats.h b/Source/WTF/wtf/SimpleStats.h
index 45a8958a7..872b7ad63 100644
--- a/Source/WTF/wtf/SimpleStats.h
+++ b/Source/WTF/wtf/SimpleStats.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -49,9 +49,9 @@ public:
m_sumOfSquares += value * value;
}
- bool operator!() const
+ explicit operator bool() const
{
- return !m_count;
+ return !!m_count;
}
double count() const
@@ -110,5 +110,7 @@ private:
} // namespace WTF
+using WTF::SimpleStats;
+
#endif // SimpleStats_h
diff --git a/Source/WTF/wtf/SixCharacterHash.cpp b/Source/WTF/wtf/SixCharacterHash.cpp
index a5c7367c3..ff8c2d719 100644
--- a/Source/WTF/wtf/SixCharacterHash.cpp
+++ b/Source/WTF/wtf/SixCharacterHash.cpp
@@ -26,46 +26,42 @@
#include "config.h"
#include "SixCharacterHash.h"
-#include <wtf/StdLibExtras.h>
-
-#include <string.h>
+#include <wtf/ASCIICType.h>
namespace WTF {
-#define TABLE ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
-
unsigned sixCharacterHashStringToInteger(const char* string)
{
unsigned hash = 0;
- RELEASE_ASSERT(strlen(string) == 6);
-
for (unsigned i = 0; i < 6; ++i) {
hash *= 62;
unsigned c = string[i];
- if (c >= 'A' && c <= 'Z') {
+ RELEASE_ASSERT(c); // FIXME: Why does this need to be a RELEASE_ASSERT?
+ if (isASCIIUpper(c)) {
hash += c - 'A';
continue;
}
- if (c >= 'a' && c <= 'z') {
+ if (isASCIILower(c)) {
hash += c - 'a' + 26;
continue;
}
- ASSERT(c >= '0' && c <= '9');
+ ASSERT(isASCIIDigit(c));
hash += c - '0' + 26 * 2;
}
-
+
+ RELEASE_ASSERT(!string[6]); // FIXME: Why does this need to be a RELEASE_ASSERT?
+
return hash;
}
std::array<char, 7> integerToSixCharacterHashString(unsigned hash)
{
- static_assert(WTF_ARRAY_LENGTH(TABLE) - 1 == 62, "Six character hash table is not 62 characters long.");
-
+ static const char table[63] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
std::array<char, 7> buffer;
unsigned accumulator = hash;
for (unsigned i = 6; i--;) {
- buffer[i] = TABLE[accumulator % 62];
+ buffer[i] = table[accumulator % 62];
accumulator /= 62;
}
buffer[6] = 0;
diff --git a/Source/WTF/wtf/SizeLimits.cpp b/Source/WTF/wtf/SizeLimits.cpp
new file mode 100644
index 000000000..a82d0c48b
--- /dev/null
+++ b/Source/WTF/wtf/SizeLimits.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include <type_traits>
+#include <utility>
+#include <wtf/Assertions.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/RefPtr.h>
+#include <wtf/Vector.h>
+
+namespace WTF {
+
+#if !defined(NDEBUG) || ENABLE(SECURITY_ASSERTIONS)
+struct SameSizeAsRefCounted {
+ int a;
+ bool b;
+ bool c;
+ // The debug version may get bigger.
+};
+#else
+struct SameSizeAsRefCounted {
+ int a;
+ // Don't add anything here because this should stay small.
+};
+#endif
+
+static_assert(sizeof(PassRefPtr<RefCounted<int>>) == sizeof(int*), "PassRefPtr should stay small!");
+static_assert(sizeof(RefCounted<int>) == sizeof(SameSizeAsRefCounted), "RefCounted should stay small!");
+static_assert(sizeof(RefPtr<RefCounted<int>>) == sizeof(int*), "RefPtr should stay small!");
+
+#if !ASAN_ENABLED
+template<typename T, unsigned inlineCapacity = 0>
+struct SameSizeAsVectorWithInlineCapacity;
+
+template<typename T>
+struct SameSizeAsVectorWithInlineCapacity<T, 0> {
+ void* bufferPointer;
+ unsigned capacity;
+ unsigned size;
+};
+
+template<typename T, unsigned inlineCapacity>
+struct SameSizeAsVectorWithInlineCapacity {
+ SameSizeAsVectorWithInlineCapacity<T, 0> baseCapacity;
+ typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type inlineBuffer[inlineCapacity];
+};
+
+static_assert(sizeof(Vector<int>) == sizeof(SameSizeAsVectorWithInlineCapacity<int>), "Vector should stay small!");
+static_assert(sizeof(Vector<int, 1>) == sizeof(SameSizeAsVectorWithInlineCapacity<int, 1>), "Vector should stay small!");
+static_assert(sizeof(Vector<int, 2>) == sizeof(SameSizeAsVectorWithInlineCapacity<int, 2>), "Vector should stay small!");
+static_assert(sizeof(Vector<int, 3>) == sizeof(SameSizeAsVectorWithInlineCapacity<int, 3>), "Vector should stay small!");
+#endif
+}
diff --git a/Source/WTF/wtf/SmallPtrSet.h b/Source/WTF/wtf/SmallPtrSet.h
new file mode 100644
index 000000000..da36c8da4
--- /dev/null
+++ b/Source/WTF/wtf/SmallPtrSet.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SmallPtrSet_h
+#define SmallPtrSet_h
+
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/HashFunctions.h>
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+template<typename PtrType, unsigned SmallArraySize = 8>
+class SmallPtrSet {
+ WTF_MAKE_NONCOPYABLE(SmallPtrSet);
+ static_assert(std::is_trivially_destructible<PtrType>::value, "We currently don't support non-trivially destructible pointer types.");
+ static_assert(sizeof(PtrType) == sizeof(void*), "Only support pointer sized things.");
+ static_assert(!(SmallArraySize & (SmallArraySize - 1)), "Inline size must be a power of two.");
+
+public:
+ SmallPtrSet()
+ {
+ initialize();
+ }
+
+ // We take care to have SmallPtrSet have partial move semantics allowable through
+ // memcpy. It's partial move semantics because our destructor should not be called
+ // on the SmallPtrObject in the old memory we were moved from (otherwise, we might free m_buffer twice)
+ // unless that old memory is reset to be isSmall(). See move constructor below.
+ // To maintain these semantics, we determine if we're small by checking our size
+ // and not our m_buffer pointer. And when we're small, we don't do operations on
+ // m_buffer, instead, we perform operations on m_smallStorage directly. The reason we want
+ // these semantics is that it's beneficial to have a Vector that contains SmallPtrSet
+ // (or an object with SmallPtrSet as a field) be allowed to use memcpy for its move operation.
+
+ SmallPtrSet(SmallPtrSet&& other)
+ {
+ memcpy(this, &other, sizeof(SmallPtrSet));
+ other.initialize();
+ }
+
+ SmallPtrSet& operator=(SmallPtrSet&& other)
+ {
+ this->~SmallPtrSet();
+ new (this) SmallPtrSet(WTFMove(other));
+ return *this;
+ }
+
+ ~SmallPtrSet()
+ {
+ if (!isSmall())
+ fastFree(m_buffer);
+ }
+
+ inline void add(PtrType ptr)
+ {
+ ASSERT(isValidEntry(ptr));
+
+ if (isSmall()) {
+ for (unsigned i = 0; i < m_size; i++) {
+ if (m_smallStorage[i] == ptr)
+ return;
+ }
+
+ if (m_size < SmallArraySize) {
+ m_smallStorage[m_size] = ptr;
+ ++m_size;
+ return;
+ }
+
+ grow(std::max(64u, SmallArraySize * 2));
+ // Fall through. We're no longer small :(
+ }
+
+ // If we're more than 3/4ths full we grow.
+ if (UNLIKELY(m_size * 4 >= m_capacity * 3)) {
+ grow(m_capacity * 2);
+ ASSERT(!(m_capacity & (m_capacity - 1)));
+ }
+
+ void** bucket = this->bucket(ptr);
+ if (*bucket != ptr) {
+ *bucket = ptr;
+ ++m_size;
+ }
+ }
+
+ inline bool contains(PtrType ptr) const
+ {
+ ASSERT(isValidEntry(ptr));
+ if (isSmall()) {
+ for (unsigned i = 0; i < m_size; i++) { // We only need to search up to m_size because we store things linearly inside m_smallStorage.
+ if (m_smallStorage[i] == ptr)
+ return true;
+ }
+ return false;
+ }
+
+ void** bucket = this->bucket(ptr);
+ return *bucket == ptr;
+ }
+
+ class iterator {
+ public:
+ iterator& operator++()
+ {
+ m_index++;
+ ASSERT(m_index <= m_capacity);
+ while (m_index < m_capacity && m_buffer[m_index] == emptyValue())
+ m_index++;
+ return *this;
+ }
+
+ PtrType operator*() const { ASSERT(m_index < m_capacity); return static_cast<PtrType>(m_buffer[m_index]); }
+ bool operator==(const iterator& other) const { ASSERT(m_buffer == other.m_buffer); return m_index == other.m_index; }
+ bool operator!=(const iterator& other) const { ASSERT(m_buffer == other.m_buffer); return !(*this == other); }
+
+ private:
+ template<typename U, unsigned S> friend class WTF::SmallPtrSet;
+ unsigned m_index;
+ unsigned m_capacity;
+ void** m_buffer;
+ };
+
+ iterator begin() const
+ {
+ iterator it;
+ it.m_index = std::numeric_limits<unsigned>::max();
+ it.m_capacity = m_capacity;
+ if (isSmall())
+ it.m_buffer = const_cast<void**>(m_smallStorage);
+ else
+ it.m_buffer = m_buffer;
+
+ ++it;
+
+ return it;
+ }
+
+ iterator end() const
+ {
+ iterator it;
+ it.m_index = m_capacity;
+ it.m_capacity = m_capacity;
+ if (isSmall())
+ it.m_buffer = const_cast<void**>(m_smallStorage);
+ else
+ it.m_buffer = m_buffer;
+
+ return it;
+ }
+
+ inline unsigned size() const { return m_size; }
+
+private:
+ constexpr static void* emptyValue()
+ {
+ return bitwise_cast<void*>(std::numeric_limits<uintptr_t>::max());
+ }
+
+ bool isValidEntry(const PtrType ptr) const
+ {
+ return ptr != emptyValue();
+ }
+
+ inline bool isSmall() const
+ {
+ return m_capacity == SmallArraySize;
+ }
+
+ inline void initialize()
+ {
+ m_size = 0;
+ m_buffer = nullptr;
+ m_capacity = SmallArraySize;
+ memset(m_smallStorage, -1, sizeof(void*) * SmallArraySize);
+ ASSERT(isSmall());
+ }
+
+ inline void grow(unsigned size)
+ {
+ ASSERT(static_cast<int32_t>(bitwise_cast<intptr_t>(emptyValue())) == -1);
+
+ size_t allocationSize = sizeof(void*) * size;
+ bool wasSmall = isSmall();
+ void** oldBuffer = wasSmall ? m_smallStorage : m_buffer;
+ unsigned oldCapacity = m_capacity;
+ m_buffer = static_cast<void**>(fastMalloc(allocationSize));
+ memset(m_buffer, -1, allocationSize);
+ m_capacity = size;
+
+ for (unsigned i = 0; i < oldCapacity; i++) {
+ if (oldBuffer[i] != emptyValue()) {
+ void** ptr = this->bucket(static_cast<PtrType>(oldBuffer[i]));
+ *ptr = oldBuffer[i];
+ }
+ }
+
+ if (!wasSmall)
+ fastFree(oldBuffer);
+ }
+
+
+ inline void** bucket(PtrType target) const
+ {
+ ASSERT(!(m_capacity & (m_capacity - 1)));
+ unsigned bucket = PtrHashBase<PtrType, false /* isSmartPtr */>::hash(target) & (m_capacity - 1);
+ unsigned index = 0;
+ while (true) {
+ void** ptr = m_buffer + bucket;
+ if (*ptr == emptyValue())
+ return ptr;
+ if (*ptr == target)
+ return ptr;
+ index++;
+ bucket = (bucket + index) & (m_capacity - 1);
+ }
+ }
+
+ unsigned m_size;
+ unsigned m_capacity;
+ void** m_buffer;
+ void* m_smallStorage[SmallArraySize];
+};
+
+} // namespace WTF
+
+using WTF::SmallPtrSet;
+
+#endif // SmallPtrSet_h
diff --git a/Source/WTF/wtf/Spectrum.h b/Source/WTF/wtf/Spectrum.h
index 3e6fa4a63..44f7a8180 100644
--- a/Source/WTF/wtf/Spectrum.h
+++ b/Source/WTF/wtf/Spectrum.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,22 +32,31 @@
namespace WTF {
-template<typename T>
+template<typename T, typename CounterType = unsigned>
class Spectrum {
public:
- typedef typename HashMap<T, unsigned long>::iterator iterator;
- typedef typename HashMap<T, unsigned long>::const_iterator const_iterator;
+ typedef typename HashMap<T, CounterType>::iterator iterator;
+ typedef typename HashMap<T, CounterType>::const_iterator const_iterator;
Spectrum() { }
- void add(const T& key, unsigned long count = 1)
+ void add(const T& key, CounterType count = 1)
{
- typename HashMap<T, unsigned long>::AddResult result = m_map.add(key, count);
+ if (!count)
+ return;
+ typename HashMap<T, CounterType>::AddResult result = m_map.add(key, count);
if (!result.isNewEntry)
result.iterator->value += count;
}
- unsigned long get(const T& key) const
+ template<typename U>
+ void addAll(const Spectrum<T, U>& otherSpectrum)
+ {
+ for (auto& entry : otherSpectrum)
+ add(entry.key, entry.count);
+ }
+
+ CounterType get(const T& key) const
{
const_iterator iter = m_map.find(key);
if (iter == m_map.end())
@@ -55,6 +64,8 @@ public:
return iter->value;
}
+ size_t size() const { return m_map.size(); }
+
iterator begin() { return m_map.begin(); }
iterator end() { return m_map.end(); }
const_iterator begin() const { return m_map.begin(); }
@@ -63,7 +74,7 @@ public:
struct KeyAndCount {
KeyAndCount() { }
- KeyAndCount(const T& key, unsigned long count)
+ KeyAndCount(const T& key, CounterType count)
: key(key)
, count(count)
{
@@ -80,7 +91,7 @@ public:
}
T key;
- unsigned long count;
+ CounterType count;
};
// Returns a list ordered from lowest-count to highest-count.
@@ -94,8 +105,18 @@ public:
return list;
}
+ void clear() { m_map.clear(); }
+
+ template<typename Functor>
+ void removeIf(const Functor& functor)
+ {
+ m_map.removeIf([&functor] (typename HashMap<T, CounterType>::KeyValuePairType& pair) {
+ return functor(KeyAndCount(pair.key, pair.value));
+ });
+ }
+
private:
- HashMap<T, unsigned long> m_map;
+ HashMap<T, CounterType> m_map;
};
} // namespace WTF
diff --git a/Source/WTF/wtf/StackBounds.cpp b/Source/WTF/wtf/StackBounds.cpp
index 4cb031cb2..f308ac1d6 100644
--- a/Source/WTF/wtf/StackBounds.cpp
+++ b/Source/WTF/wtf/StackBounds.cpp
@@ -119,21 +119,11 @@ void StackBounds::initialize()
void StackBounds::initialize()
{
- MEMORY_BASIC_INFORMATION stackOrigin;
+ MEMORY_BASIC_INFORMATION stackOrigin = { 0 };
VirtualQuery(&stackOrigin, &stackOrigin, sizeof(stackOrigin));
// stackOrigin.AllocationBase points to the reserved stack memory base address.
m_origin = static_cast<char*>(stackOrigin.BaseAddress) + stackOrigin.RegionSize;
-#if OS(WINCE)
- SYSTEM_INFO systemInfo;
- GetSystemInfo(&systemInfo);
- DWORD pageSize = systemInfo.dwPageSize;
-
- MEMORY_BASIC_INFORMATION stackMemory;
- VirtualQuery(m_origin, &stackMemory, sizeof(stackMemory));
-
- m_bound = static_cast<char*>(m_origin) - stackMemory.RegionSize + pageSize;
-#else
// The stack on Windows consists out of three parts (uncommitted memory, a guard page and present
// committed memory). The 3 regions have different BaseAddresses but all have the same AllocationBase
// since they are all from the same VirtualAlloc. The 3 regions are laid out in memory (from high to
@@ -173,7 +163,6 @@ void StackBounds::initialize()
ASSERT(endOfStack == computedEnd);
#endif // NDEBUG
m_bound = static_cast<char*>(endOfStack) + guardPage.RegionSize;
-#endif // OS(WINCE)
}
#else
diff --git a/Source/WTF/wtf/StackBounds.h b/Source/WTF/wtf/StackBounds.h
index ca397496f..ce9ea96dd 100644
--- a/Source/WTF/wtf/StackBounds.h
+++ b/Source/WTF/wtf/StackBounds.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -27,6 +27,8 @@
#ifndef StackBounds_h
#define StackBounds_h
+#include <algorithm>
+
namespace WTF {
class StackBounds {
@@ -52,6 +54,12 @@ public:
return m_origin;
}
+ void* end() const
+ {
+ ASSERT(m_bound);
+ return m_bound;
+ }
+
size_t size() const
{
if (isGrowingDownward())
@@ -67,14 +75,36 @@ public:
return static_cast<char*>(m_bound) - minAvailableDelta;
}
+ void* recursionLimit(char* startOfUserStack, size_t maxUserStack, size_t reservedZoneSize) const
+ {
+ checkConsistency();
+ if (maxUserStack < reservedZoneSize)
+ reservedZoneSize = maxUserStack;
+ size_t maxUserStackWithReservedZone = maxUserStack - reservedZoneSize;
+
+ if (isGrowingDownward()) {
+ char* endOfStackWithReservedZone = reinterpret_cast<char*>(m_bound) + reservedZoneSize;
+ if (startOfUserStack < endOfStackWithReservedZone)
+ return endOfStackWithReservedZone;
+ size_t availableUserStack = startOfUserStack - endOfStackWithReservedZone;
+ if (maxUserStackWithReservedZone > availableUserStack)
+ maxUserStackWithReservedZone = availableUserStack;
+ return startOfUserStack - maxUserStackWithReservedZone;
+ }
+
+ char* endOfStackWithReservedZone = reinterpret_cast<char*>(m_bound) - reservedZoneSize;
+ if (startOfUserStack > endOfStackWithReservedZone)
+ return endOfStackWithReservedZone;
+ size_t availableUserStack = endOfStackWithReservedZone - startOfUserStack;
+ if (maxUserStackWithReservedZone > availableUserStack)
+ maxUserStackWithReservedZone = availableUserStack;
+ return startOfUserStack + maxUserStackWithReservedZone;
+ }
+
bool isGrowingDownward() const
{
ASSERT(m_origin && m_bound);
-#if OS(WINCE)
- return m_origin > m_bound;
-#else
return true;
-#endif
}
private:
diff --git a/Source/WTF/wtf/StackStats.cpp b/Source/WTF/wtf/StackStats.cpp
new file mode 100644
index 000000000..064f2f052
--- /dev/null
+++ b/Source/WTF/wtf/StackStats.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "StackStats.h"
+
+#if ENABLE(STACK_STATS)
+
+#include "Assertions.h"
+#include "DataLog.h"
+#include "WTFThreadData.h"
+
+// Define the following flag if you want to collect stats on every single
+// checkpoint. By default, we only log checkpoints that establish new
+// max values.
+
+#define ENABLE_VERBOSE_STACK_STATS 1
+
+
+namespace WTF {
+
+// CheckPoint management:
+StaticLock StackStats::s_sharedMutex;
+StackStats::CheckPoint* StackStats::s_topCheckPoint = 0;
+StackStats::LayoutCheckPoint* StackStats::s_firstLayoutCheckPoint = 0;
+StackStats::LayoutCheckPoint* StackStats::s_topLayoutCheckPoint = 0;
+
+// High watermark stats:
+int StackStats::s_maxCheckPointDiff = 0;
+int StackStats::s_maxStackHeight = 0;
+int StackStats::s_maxReentryDepth = 0;
+
+int StackStats::s_maxLayoutCheckPointDiff = 0;
+int StackStats::s_maxTotalLayoutCheckPointDiff = 0;
+int StackStats::s_maxLayoutReentryDepth = 0;
+
+
+StackStats::PerThreadStats::PerThreadStats()
+{
+ const StackBounds& stack = wtfThreadData().stack();
+ m_reentryDepth = 0;
+ m_stackStart = (char*)stack.origin();
+ m_currentCheckPoint = 0;
+
+ dataLogF(" === THREAD new stackStart %p ========\n", m_stackStart);
+}
+
+StackStats::CheckPoint::CheckPoint()
+{
+ std::lock_guard<StaticLock> lock(StackStats::s_sharedMutex);
+ WTFThreadData* threadData = const_cast<WTFThreadData*>(&wtfThreadData());
+ StackStats::PerThreadStats& t = threadData->stackStats();
+ const StackBounds& stack = threadData->stack();
+
+ bool isGrowingDownward = stack.isGrowingDownward();
+ bool needToLog = false;
+ char* current = reinterpret_cast<char*>(this);
+ char* last = reinterpret_cast<char*>(t.m_currentCheckPoint);
+
+ // If there was no previous checkpoint, measure from the start of the stack:
+ if (!last)
+ last = t.m_stackStart;
+
+ // Update the reentry depth stats:
+ t.m_reentryDepth++;
+ if (t.m_reentryDepth > StackStats::s_maxReentryDepth) {
+ StackStats::s_maxReentryDepth = t.m_reentryDepth;
+ needToLog = true;
+ }
+
+ // Update the stack height stats:
+ int height = t.m_stackStart - current;
+ if (!isGrowingDownward)
+ height = -height;
+ if (height > StackStats::s_maxStackHeight) {
+ StackStats::s_maxStackHeight = height;
+ needToLog = true;
+ }
+
+ // Update the checkpoint diff stats:
+ int diff = last - current;
+ if (!isGrowingDownward)
+ diff = -diff;
+ if (diff > StackStats::s_maxCheckPointDiff) {
+ StackStats::s_maxCheckPointDiff = diff;
+ needToLog = true;
+ }
+
+ // Push this checkpoint:
+ m_prev = t.m_currentCheckPoint;
+ t.m_currentCheckPoint = this;
+
+#if ENABLE(VERBOSE_STACK_STATS)
+ needToLog = true; // always log.
+#endif
+
+ // Log this checkpoint if needed:
+ if (needToLog)
+ dataLogF(" CHECKPOINT %p diff %d/%.1fk/max %.1fk | reentry %d/max %d | height %.1fk/max %.1fk | stack %p size %.1fk\n",
+ this, diff, diff / 1024.0, StackStats::s_maxCheckPointDiff / 1024.0,
+ t.m_reentryDepth, StackStats::s_maxReentryDepth,
+ height / 1024.0, StackStats::s_maxStackHeight / 1024.0,
+ stack.origin(), stack.size() / 1024.0);
+}
+
+StackStats::CheckPoint::~CheckPoint()
+{
+ std::lock_guard<StaticLock> lock(StackStats::s_sharedMutex);
+ WTFThreadData* threadData = const_cast<WTFThreadData*>(&wtfThreadData());
+ StackStats::PerThreadStats& t = threadData->stackStats();
+
+ // Pop to previous checkpoint:
+ t.m_currentCheckPoint = m_prev;
+ --t.m_reentryDepth;
+
+ // Log this checkpoint if needed:
+#if ENABLE(VERBOSE_STACK_STATS)
+ if (!m_prev) {
+ const StackBounds& stack = threadData->stack();
+ bool isGrowingDownward = stack.isGrowingDownward();
+
+ char* current = reinterpret_cast<char*>(this);
+ int height = t.m_stackStart - current;
+
+ if (!isGrowingDownward)
+ height = -height;
+
+ dataLogF(" POP to %p diff max %.1fk | reentry %d/%d max | height %.1fk/max %.1fk | stack %p size %.1fk)\n",
+ this, StackStats::s_maxCheckPointDiff / 1024.0,
+ t.m_reentryDepth, StackStats::s_maxReentryDepth,
+ height / 1024.0, StackStats::s_maxStackHeight / 1024.0,
+ stack.origin(), stack.size() / 1024.0);
+ }
+#endif
+}
+
+void StackStats::probe()
+{
+ std::lock_guard<StaticLock> lock(StackStats::s_sharedMutex);
+ WTFThreadData* threadData = const_cast<WTFThreadData*>(&wtfThreadData());
+ StackStats::PerThreadStats& t = threadData->stackStats();
+ const StackBounds& stack = threadData->stack();
+
+ bool isGrowingDownward = stack.isGrowingDownward();
+
+ bool needToLog = false;
+
+ int dummy;
+ char* current = reinterpret_cast<char*>(&dummy);
+ char* last = reinterpret_cast<char*>(t.m_currentCheckPoint);
+
+ // If there was no previous checkpoint, measure from the start of the stack:
+ if (!last)
+ last = t.m_stackStart;
+
+ // We did not reach another checkpoint yet. Hence, we do not touch the
+ // reentry stats.
+
+ // Update the stack height stats:
+ int height = t.m_stackStart - current;
+ if (!isGrowingDownward)
+ height = -height;
+ if (height > StackStats::s_maxStackHeight) {
+ StackStats::s_maxStackHeight = height;
+ needToLog = true;
+ }
+
+ // Update the checkpoint diff stats:
+ int diff = last - current;
+ if (!isGrowingDownward)
+ diff = -diff;
+ if (diff > StackStats::s_maxCheckPointDiff) {
+ StackStats::s_maxCheckPointDiff = diff;
+ needToLog = true;
+ }
+
+#if ENABLE(VERBOSE_STACK_STATS)
+ needToLog = true; // always log.
+#endif
+
+ if (needToLog)
+ dataLogF(" PROBE %p diff %d/%.1fk/max %.1fk | reentry %d/max %d | height %.1fk/max %.1fk | stack %p size %.1fk\n",
+ current, diff, diff / 1024.0, StackStats::s_maxCheckPointDiff / 1024.0,
+ t.m_reentryDepth, StackStats::s_maxReentryDepth,
+ height / 1024.0, StackStats::s_maxStackHeight / 1024.0,
+ stack.origin(), stack.size() / 1024.0);
+}
+
+StackStats::LayoutCheckPoint::LayoutCheckPoint()
+{
+ // While a layout checkpoint is not necessarily a checkpoint where we
+ // we will do a recursion check, it is a convenient spot for doing a
+ // probe to measure the height of stack usage.
+ //
+ // We'll do this probe before we commence with the layout checkpoint.
+ // This is because the probe also locks the sharedLock. By calling the
+ // probe first, we can avoid re-entering the lock.
+ StackStats::probe();
+
+ std::lock_guard<StaticLock> lock(StackStats::s_sharedMutex);
+ WTFThreadData* threadData = const_cast<WTFThreadData*>(&wtfThreadData());
+ StackStats::PerThreadStats& t = threadData->stackStats();
+ const StackBounds& stack = threadData->stack();
+
+ bool isGrowingDownward = stack.isGrowingDownward();
+
+ // Push this checkpoint:
+ m_prev = StackStats::s_topLayoutCheckPoint;
+ if (m_prev)
+ m_depth = m_prev->m_depth + 1;
+ else {
+ StackStats::s_firstLayoutCheckPoint = this;
+ m_depth = 0;
+ }
+ StackStats::s_topLayoutCheckPoint = this;
+
+ //
+ char* current = reinterpret_cast<char*>(this);
+ char* last = reinterpret_cast<char*>(m_prev);
+ char* root = reinterpret_cast<char*>(StackStats::s_firstLayoutCheckPoint);
+ bool needToLog = false;
+
+ int diff = last - current;
+ if (!last)
+ diff = 0;
+ int totalDiff = root - current;
+ if (!root)
+ totalDiff = 0;
+
+ // Update the stack height stats:
+ int height = t.m_stackStart - current;
+ if (!isGrowingDownward)
+ height = -height;
+ if (height > StackStats::s_maxStackHeight) {
+ StackStats::s_maxStackHeight = height;
+ needToLog = true;
+ }
+
+ // Update the layout checkpoint diff stats:
+ if (!isGrowingDownward)
+ diff = -diff;
+ if (diff > StackStats::s_maxLayoutCheckPointDiff) {
+ StackStats::s_maxLayoutCheckPointDiff = diff;
+ needToLog = true;
+ }
+
+ // Update the total layout checkpoint diff stats:
+ if (!isGrowingDownward)
+ totalDiff = -totalDiff;
+ if (totalDiff > StackStats::s_maxTotalLayoutCheckPointDiff) {
+ StackStats::s_maxTotalLayoutCheckPointDiff = totalDiff;
+ needToLog = true;
+ }
+
+#if ENABLE(VERBOSE_STACK_STATS)
+ needToLog = true; // always log.
+#endif
+
+ if (needToLog)
+ dataLogF(" LAYOUT %p diff %d/%.1fk/max %.1fk | reentry %d/max %d | height %.1fk/max %.1fk | stack %p size %.1fk\n",
+ current, diff, diff / 1024.0, StackStats::s_maxLayoutCheckPointDiff / 1024.0,
+ m_depth, StackStats::s_maxLayoutReentryDepth,
+ totalDiff / 1024.0, StackStats::s_maxTotalLayoutCheckPointDiff / 1024.0,
+ stack.origin(), stack.size() / 1024.0);
+}
+
+StackStats::LayoutCheckPoint::~LayoutCheckPoint()
+{
+ std::lock_guard<StaticLock> lock(StackStats::s_sharedMutex);
+
+ // Pop to the previous layout checkpoint:
+ StackStats::s_topLayoutCheckPoint = m_prev;
+ if (!m_depth)
+ StackStats::s_firstLayoutCheckPoint = 0;
+}
+
+} // namespace WTF
+
+#endif // ENABLE(STACK_STATS)
+
diff --git a/Source/WTF/wtf/StackStats.h b/Source/WTF/wtf/StackStats.h
index 244fae8b9..f4ef57f3b 100644
--- a/Source/WTF/wtf/StackStats.h
+++ b/Source/WTF/wtf/StackStats.h
@@ -28,6 +28,7 @@
#include "ExportMacros.h"
#include <mutex>
+#include <wtf/Lock.h>
// Define this flag to enable Stack stats collection. This feature is useful
@@ -67,7 +68,6 @@ public:
LayoutCheckPoint() { }
};
- static void initialize() { }
static void probe() { }
};
@@ -109,9 +109,6 @@ public:
int m_depth;
};
- // Initializes locks and the log file. Should only be called once.
- static void initialize();
-
// Used for probing the stack at places where we suspect to be high
// points of stack usage but are NOT check points where stack recursion
// is checked.
@@ -124,7 +121,7 @@ public:
private:
// CheckPoint management:
- static std::mutex* s_sharedMutex;
+ static StaticLock s_sharedMutex;
static CheckPoint* s_topCheckPoint;
static LayoutCheckPoint* s_firstLayoutCheckPoint;
static LayoutCheckPoint* s_topLayoutCheckPoint;
diff --git a/Source/WTF/wtf/StaticConstructors.h b/Source/WTF/wtf/StaticConstructors.h
index 23e403803..d559af770 100644
--- a/Source/WTF/wtf/StaticConstructors.h
+++ b/Source/WTF/wtf/StaticConstructors.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Apple Computer, Inc.
+ * Copyright (C) 2006 Apple Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
diff --git a/Source/WTF/wtf/StdLibExtras.h b/Source/WTF/wtf/StdLibExtras.h
index 2e35031da..716661fad 100644
--- a/Source/WTF/wtf/StdLibExtras.h
+++ b/Source/WTF/wtf/StdLibExtras.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2008, 2016 Apple Inc. All Rights Reserved.
* Copyright (C) 2013 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -28,35 +28,33 @@
#define WTF_StdLibExtras_h
#include <chrono>
+#include <cstring>
#include <memory>
#include <wtf/Assertions.h>
#include <wtf/CheckedArithmetic.h>
+#include <wtf/Compiler.h>
-// Use these to declare and define a static local variable (static T;) so that
-// it is leaked so that its destructors are not called at exit. Using this
-// macro also allows workarounds a compiler bug present in Apple's version of GCC 4.0.1.
-#ifndef DEFINE_STATIC_LOCAL
-#if COMPILER(GCC) && defined(__APPLE_CC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 0 && __GNUC_PATCHLEVEL__ == 1
-#define DEFINE_STATIC_LOCAL(type, name, arguments) \
- static type* name##Ptr = new type arguments; \
- type& name = *name##Ptr
-#else
-#define DEFINE_STATIC_LOCAL(type, name, arguments) \
+// This was used to declare and define a static local variable (static T;) so that
+// it was leaked so that its destructors were not called at exit.
+// Newly written code should use static NeverDestroyed<T> instead.
+#ifndef DEPRECATED_DEFINE_STATIC_LOCAL
+#define DEPRECATED_DEFINE_STATIC_LOCAL(type, name, arguments) \
static type& name = *new type arguments
#endif
-#endif
// Use this macro to declare and define a debug-only global variable that may have a
// non-trivial constructor and destructor. When building with clang, this will suppress
// warnings about global constructors and exit-time destructors.
-#ifndef NDEBUG
-#if COMPILER(CLANG)
-#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) \
+#define DEFINE_GLOBAL_FOR_LOGGING(type, name, arguments) \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
static type name arguments; \
_Pragma("clang diagnostic pop")
+
+#ifndef NDEBUG
+#if COMPILER(CLANG)
+#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) DEFINE_GLOBAL_FOR_LOGGING(type, name, arguments)
#else
#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) \
static type name arguments;
@@ -70,10 +68,17 @@
// NULL can cause compiler problems, especially in cases of multiple inheritance.
#define OBJECT_OFFSETOF(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
+#define CAST_OFFSET(from, to) (reinterpret_cast<uintptr_t>(static_cast<to>((reinterpret_cast<from>(0x4000)))) - 0x4000)
+
// STRINGIZE: Can convert any value to quoted string, even expandable macros
#define STRINGIZE(exp) #exp
#define STRINGIZE_VALUE_OF(exp) STRINGIZE(exp)
+// WTF_CONCAT: concatenate two symbols into one, even expandable macros
+#define WTF_CONCAT_INTERNAL_DONT_USE(a, b) a ## b
+#define WTF_CONCAT(a, b) WTF_CONCAT_INTERNAL_DONT_USE(a, b)
+
+
/*
* The reinterpret_cast<Type1*>([pointer to Type2]) expressions - where
* sizeof(Type1) > sizeof(Type2) - cause the following warning on ARM with GCC:
@@ -84,7 +89,7 @@
* - https://bugs.webkit.org/show_bug.cgi?id=38045
* - http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43976
*/
-#if (CPU(ARM) || CPU(MIPS)) && COMPILER(GCC)
+#if (CPU(ARM) || CPU(MIPS)) && COMPILER(GCC_OR_CLANG)
template<typename Type>
inline bool isPointerTypeAlignmentOkay(Type* ptr)
{
@@ -115,8 +120,11 @@ inline bool isPointerTypeAlignmentOkay(Type*)
namespace WTF {
+enum CheckMoveParameterTag { CheckMoveParameter };
+
static const size_t KB = 1024;
static const size_t MB = 1024 * 1024;
+static const size_t GB = 1024 * 1024 * 1024;
inline bool isPointerAligned(void* p)
{
@@ -135,18 +143,20 @@ template<typename ToType, typename FromType>
inline ToType bitwise_cast(FromType from)
{
static_assert(sizeof(FromType) == sizeof(ToType), "bitwise_cast size of FromType and ToType must be equal!");
- union {
- FromType from;
- ToType to;
- } u;
- u.from = from;
- return u.to;
+#if COMPILER_SUPPORTS(BUILTIN_IS_TRIVIALLY_COPYABLE)
+ // Not all recent STL implementations support the std::is_trivially_copyable type trait. Work around this by only checking on toolchains which have the equivalent compiler intrinsic.
+ static_assert(__is_trivially_copyable(ToType), "bitwise_cast of non-trivially-copyable type!");
+ static_assert(__is_trivially_copyable(FromType), "bitwise_cast of non-trivially-copyable type!");
+#endif
+ typename std::remove_const<ToType>::type to { };
+ std::memcpy(&to, &from, sizeof(to));
+ return to;
}
template<typename ToType, typename FromType>
inline ToType safeCast(FromType value)
{
- ASSERT(isInBounds<ToType>(value));
+ RELEASE_ASSERT(isInBounds<ToType>(value));
return static_cast<ToType>(value);
}
@@ -166,23 +176,32 @@ inline size_t bitCount(uint64_t bits)
// Macro that returns a compile time constant with the length of an array, but gives an error if passed a non-array.
template<typename T, size_t Size> char (&ArrayLengthHelperFunction(T (&)[Size]))[Size];
// GCC needs some help to deduce a 0 length array.
-#if COMPILER(GCC)
+#if COMPILER(GCC_OR_CLANG)
template<typename T> char (&ArrayLengthHelperFunction(T (&)[0]))[0];
#endif
#define WTF_ARRAY_LENGTH(array) sizeof(::WTF::ArrayLengthHelperFunction(array))
+ALWAYS_INLINE constexpr size_t roundUpToMultipleOfImpl0(size_t remainderMask, size_t x)
+{
+ return (x + remainderMask) & ~remainderMask;
+}
+
+ALWAYS_INLINE constexpr size_t roundUpToMultipleOfImpl(size_t divisor, size_t x)
+{
+ return roundUpToMultipleOfImpl0(divisor - 1, x);
+}
+
// Efficient implementation that takes advantage of powers of two.
inline size_t roundUpToMultipleOf(size_t divisor, size_t x)
{
ASSERT(divisor && !(divisor & (divisor - 1)));
- size_t remainderMask = divisor - 1;
- return (x + remainderMask) & ~remainderMask;
+ return roundUpToMultipleOfImpl(divisor, x);
}
-template<size_t divisor> inline size_t roundUpToMultipleOf(size_t x)
+template<size_t divisor> inline constexpr size_t roundUpToMultipleOf(size_t x)
{
static_assert(divisor && !(divisor & (divisor - 1)), "divisor must be a power of two!");
- return roundUpToMultipleOf(divisor, x);
+ return roundUpToMultipleOfImpl(divisor, x);
}
enum BinarySearchMode {
@@ -276,32 +295,120 @@ inline void insertIntoBoundedVector(VectorType& vector, size_t size, const Eleme
vector[index] = element;
}
-} // namespace WTF
+// This is here instead of CompilationThread.h to prevent that header from being included
+// everywhere. The fact that this method, and that header, exist outside of JSC is a bug.
+// https://bugs.webkit.org/show_bug.cgi?id=131815
+WTF_EXPORT_PRIVATE bool isCompilationThread();
+
+template<typename Func>
+bool isStatelessLambda()
+{
+ return std::is_empty<Func>::value;
+}
-#if OS(WINCE)
-// Windows CE CRT has does not implement bsearch().
-inline void* wtf_bsearch(const void* key, const void* base, size_t count, size_t size, int (*compare)(const void *, const void *))
-{
- const char* first = static_cast<const char*>(base);
-
- while (count) {
- size_t pos = (count - 1) >> 1;
- const char* item = first + pos * size;
- int compareResult = compare(item, key);
- if (!compareResult)
- return const_cast<char*>(item);
- if (compareResult < 0) {
- count -= (pos + 1);
- first += (pos + 1) * size;
- } else
- count = pos;
+template<typename ResultType, typename Func, typename... ArgumentTypes>
+ResultType callStatelessLambda(ArgumentTypes&&... arguments)
+{
+ uint64_t data[(sizeof(Func) + sizeof(uint64_t) - 1) / sizeof(uint64_t)];
+ memset(data, 0, sizeof(data));
+ return (*bitwise_cast<Func*>(data))(std::forward<ArgumentTypes>(arguments)...);
+}
+
+template<typename T, typename U>
+bool checkAndSet(T& left, U right)
+{
+ if (left == right)
+ return false;
+ left = right;
+ return true;
+}
+
+template<typename T>
+bool findBitInWord(T word, size_t& index, size_t endIndex, bool value)
+{
+ static_assert(std::is_unsigned<T>::value, "Type used in findBitInWord must be unsigned");
+
+ word >>= index;
+
+ while (index < endIndex) {
+ if ((word & 1) == static_cast<T>(value))
+ return true;
+ index++;
+ word >>= 1;
+ }
+
+ index = endIndex;
+ return false;
+}
+
+// Visitor adapted from http://stackoverflow.com/questions/25338795/is-there-a-name-for-this-tuple-creation-idiom
+
+template <class A, class... B>
+struct Visitor : Visitor<A>, Visitor<B...> {
+ Visitor(A a, B... b)
+ : Visitor<A>(a)
+ , Visitor<B...>(b...)
+ {
+ }
+
+ using Visitor<A>::operator ();
+ using Visitor<B...>::operator ();
+};
+
+template <class A>
+struct Visitor<A> : A {
+ Visitor(A a)
+ : A(a)
+ {
}
- return 0;
+ using A::operator();
+};
+
+template <class... F>
+Visitor<F...> makeVisitor(F... f)
+{
+ return Visitor<F...>(f...);
}
-#define bsearch(key, base, count, size, compare) wtf_bsearch(key, base, count, size, compare)
-#endif
+namespace Detail
+{
+ template <typename, template <typename...> class>
+ struct IsTemplate_ : std::false_type
+ {
+ };
+
+ template <typename... Ts, template <typename...> class C>
+ struct IsTemplate_<C<Ts...>, C> : std::true_type
+ {
+ };
+}
+
+template <typename T, template <typename...> class Template>
+struct IsTemplate : public std::integral_constant<bool, Detail::IsTemplate_<T, Template>::value> {};
+
+namespace Detail
+{
+ template <template <typename...> class Base, typename Derived>
+ struct IsBaseOfTemplateImpl
+ {
+ template <typename... Args>
+ static std::true_type test(Base<Args...>*);
+ static std::false_type test(void*);
+
+ static constexpr const bool value = decltype(test(std::declval<typename std::remove_cv<Derived>::type*>()))::value;
+ };
+}
+
+template <template <typename...> class Base, typename Derived>
+struct IsBaseOfTemplate : public std::integral_constant<bool, Detail::IsBaseOfTemplateImpl<Base, Derived>::value> {};
+
+template <class T>
+struct RemoveCVAndReference {
+ typedef typename std::remove_cv<typename std::remove_reference<T>::type>::type type;
+};
+
+} // namespace WTF
// This version of placement new omits a 0 check.
enum NotNullTag { NotNull };
@@ -311,20 +418,9 @@ inline void* operator new(size_t, NotNullTag, void* location)
return location;
}
-#if (COMPILER(GCC) && !COMPILER(CLANG) && !GCC_VERSION_AT_LEAST(4, 8, 1))
-
-// Work-around for Pre-C++11 syntax in MSVC 2010, and prior as well as GCC < 4.8.1.
-namespace std {
- template<class T> struct is_trivially_destructible {
- static const bool value = std::has_trivial_destructor<T>::value;
- };
-}
-#endif
-
// This adds various C++14 features for versions of the STL that may not yet have them.
namespace std {
-// MSVC 2013 supports std::make_unique already.
-#if !defined(_MSC_VER) || _MSC_VER < 1800
+#if COMPILER(CLANG) && __cplusplus < 201400L
template<class T> struct _Unique_if {
typedef unique_ptr<T> _Single_object;
};
@@ -352,58 +448,52 @@ make_unique(size_t n)
template<class T, class... Args> typename _Unique_if<T>::_Known_bound
make_unique(Args&&...) = delete;
-#endif
-// Compile-time integer sequences
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3658.html
-// (Note that we only implement index_sequence, and not the more generic integer_sequence).
-template<size_t... indexes> struct index_sequence {
- static size_t size() { return sizeof...(indexes); }
-};
+// std::exchange
+template<class T, class U = T>
+T exchange(T& t, U&& newValue)
+{
+ T oldValue = std::move(t);
+ t = std::forward<U>(newValue);
-template<size_t currentIndex, size_t...indexes> struct make_index_sequence_helper;
+ return oldValue;
+}
+#endif
-template<size_t...indexes> struct make_index_sequence_helper<0, indexes...> {
- typedef std::index_sequence<indexes...> type;
-};
+template<WTF::CheckMoveParameterTag, typename T>
+ALWAYS_INLINE constexpr typename remove_reference<T>::type&& move(T&& value)
+{
+ static_assert(is_lvalue_reference<T>::value, "T is not an lvalue reference; move() is unnecessary.");
-template<size_t currentIndex, size_t...indexes> struct make_index_sequence_helper {
- typedef typename make_index_sequence_helper<currentIndex - 1, currentIndex - 1, indexes...>::type type;
-};
+ using NonRefQualifiedType = typename remove_reference<T>::type;
+ static_assert(!is_const<NonRefQualifiedType>::value, "T is const qualified.");
-template<size_t length> struct make_index_sequence : public make_index_sequence_helper<length>::type { };
+ return move(forward<T>(value));
+}
-#if COMPILER_SUPPORTS(CXX_USER_LITERALS)
-// These literals are available in C++14, so once we require C++14 compilers we can get rid of them here.
-// (User-literals need to have a leading underscore so we add it here - the "real" literals don't have underscores).
-namespace literals {
-namespace chrono_literals {
- CONSTEXPR inline chrono::seconds operator"" _s(unsigned long long s)
- {
- return chrono::seconds(static_cast<chrono::seconds::rep>(s));
- }
+} // namespace std
- CONSTEXPR chrono::milliseconds operator"" _ms(unsigned long long ms)
- {
- return chrono::milliseconds(static_cast<chrono::milliseconds::rep>(ms));
- }
-}
-}
-#endif
-}
+#define WTFMove(value) std::move<WTF::CheckMoveParameter>(value)
using WTF::KB;
using WTF::MB;
+using WTF::GB;
+using WTF::approximateBinarySearch;
+using WTF::binarySearch;
+using WTF::bitwise_cast;
+using WTF::callStatelessLambda;
+using WTF::checkAndSet;
+using WTF::findBitInWord;
using WTF::insertIntoBoundedVector;
+using WTF::isCompilationThread;
using WTF::isPointerAligned;
+using WTF::isStatelessLambda;
using WTF::is8ByteAligned;
-using WTF::binarySearch;
-using WTF::tryBinarySearch;
-using WTF::approximateBinarySearch;
-using WTF::bitwise_cast;
+using WTF::roundUpToMultipleOf;
using WTF::safeCast;
+using WTF::tryBinarySearch;
-#if COMPILER_SUPPORTS(CXX_USER_LITERALS)
+#if !COMPILER(CLANG) || __cplusplus >= 201400L
// We normally don't want to bring in entire std namespaces, but literals are an exception.
using namespace std::literals::chrono_literals;
#endif
diff --git a/Source/WTF/wtf/Stopwatch.h b/Source/WTF/wtf/Stopwatch.h
new file mode 100644
index 000000000..9f4c01850
--- /dev/null
+++ b/Source/WTF/wtf/Stopwatch.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 University of Washington. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <cmath>
+#include <wtf/CurrentTime.h>
+#include <wtf/MonotonicTime.h>
+#include <wtf/RefCounted.h>
+
+namespace WTF {
+
+class Stopwatch : public RefCounted<Stopwatch> {
+public:
+ static Ref<Stopwatch> create()
+ {
+ return adoptRef(*new Stopwatch());
+ }
+
+ void reset();
+ void start();
+ void stop();
+
+ double elapsedTime();
+ double elapsedTimeSince(MonotonicTime);
+
+ bool isActive() const { return !std::isnan(m_lastStartTime); }
+private:
+ Stopwatch() { reset(); }
+
+ double m_elapsedTime;
+ double m_lastStartTime;
+};
+
+inline void Stopwatch::reset()
+{
+ m_elapsedTime = 0.0;
+ m_lastStartTime = NAN;
+}
+
+inline void Stopwatch::start()
+{
+ ASSERT_WITH_MESSAGE(std::isnan(m_lastStartTime), "Tried to start the stopwatch, but it is already running.");
+
+ m_lastStartTime = monotonicallyIncreasingTime();
+}
+
+inline void Stopwatch::stop()
+{
+ ASSERT_WITH_MESSAGE(!std::isnan(m_lastStartTime), "Tried to stop the stopwatch, but it is not running.");
+
+ m_elapsedTime += monotonicallyIncreasingTime() - m_lastStartTime;
+ m_lastStartTime = NAN;
+}
+
+inline double Stopwatch::elapsedTime()
+{
+ if (!isActive())
+ return m_elapsedTime;
+
+ return m_elapsedTime + (monotonicallyIncreasingTime() - m_lastStartTime);
+}
+
+inline double Stopwatch::elapsedTimeSince(MonotonicTime timeStamp)
+{
+ if (!isActive())
+ return m_elapsedTime;
+
+ return m_elapsedTime + (timeStamp.secondsSinceEpoch().seconds() - m_lastStartTime);
+}
+
+} // namespace WTF
+
+using WTF::Stopwatch;
diff --git a/Source/WTF/wtf/StreamBuffer.h b/Source/WTF/wtf/StreamBuffer.h
index f4b14c78c..ec99253cb 100644
--- a/Source/WTF/wtf/StreamBuffer.h
+++ b/Source/WTF/wtf/StreamBuffer.h
@@ -32,8 +32,6 @@
#define WTF_StreamBuffer_h
#include <wtf/Deque.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
namespace WTF {
@@ -61,7 +59,7 @@ public:
m_size += size;
while (size) {
if (!m_buffer.size() || m_buffer.last()->size() == BlockSize)
- m_buffer.append(adoptPtr(new Block));
+ m_buffer.append(std::make_unique<Block>());
size_t appendSize = std::min(BlockSize - m_buffer.last()->size(), size);
m_buffer.last()->append(data, appendSize);
data += appendSize;
@@ -108,7 +106,7 @@ public:
private:
size_t m_size;
size_t m_readOffset;
- Deque<OwnPtr<Block>> m_buffer;
+ Deque<std::unique_ptr<Block>> m_buffer;
};
} // namespace WTF
diff --git a/Source/WTF/wtf/StringExtras.h b/Source/WTF/wtf/StringExtras.h
index eaf0cf76a..6797d2ce1 100644
--- a/Source/WTF/wtf/StringExtras.h
+++ b/Source/WTF/wtf/StringExtras.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2006, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Electronic Arts, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,10 +11,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -35,58 +36,8 @@
#endif
#if COMPILER(MSVC)
-// FIXME: why a COMPILER check instead of OS? also, these should be HAVE checks
-inline int snprintf(char* buffer, size_t count, const char* format, ...)
-{
- int result;
- va_list args;
- va_start(args, format);
- result = _vsnprintf(buffer, count, format, args);
- va_end(args);
-
- // In the case where the string entirely filled the buffer, _vsnprintf will not
- // null-terminate it, but snprintf must.
- if (count > 0)
- buffer[count - 1] = '\0';
-
- return result;
-}
-
-inline double wtf_vsnprintf(char* buffer, size_t count, const char* format, va_list args)
-{
- int result = _vsnprintf(buffer, count, format, args);
-
- // In the case where the string entirely filled the buffer, _vsnprintf will not
- // null-terminate it, but vsnprintf must.
- if (count > 0)
- buffer[count - 1] = '\0';
-
- return result;
-}
-
-// Work around a difference in Microsoft's implementation of vsnprintf, where
-// vsnprintf does not null terminate the buffer. WebKit can rely on the null termination.
-#define vsnprintf(buffer, count, format, args) wtf_vsnprintf(buffer, count, format, args)
-
-#if OS(WINCE)
-
-inline int strnicmp(const char* string1, const char* string2, size_t count)
-{
- return _strnicmp(string1, string2, count);
-}
-
-inline int stricmp(const char* string1, const char* string2)
-{
- return _stricmp(string1, string2);
-}
-
-inline char* strdup(const char* strSource)
-{
- return _strdup(strSource);
-}
-
-#endif
+// FIXME: We should stop using these entirely and use suitable versions of equalIgnoringASCIICase instead.
inline int strncasecmp(const char* s1, const char* s2, size_t len)
{
diff --git a/Source/WTF/wtf/StringPrintStream.cpp b/Source/WTF/wtf/StringPrintStream.cpp
index 0fd6e4760..6a8881c5b 100644
--- a/Source/WTF/wtf/StringPrintStream.cpp
+++ b/Source/WTF/wtf/StringPrintStream.cpp
@@ -20,7 +20,7 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
@@ -52,32 +52,34 @@ void StringPrintStream::vprintf(const char* format, va_list argList)
{
ASSERT_WITH_SECURITY_IMPLICATION(m_next < m_size);
ASSERT(!m_buffer[m_next]);
-
+
va_list firstPassArgList;
va_copy(firstPassArgList, argList);
-
+
int numberOfBytesNotIncludingTerminatorThatWouldHaveBeenWritten =
vsnprintf(m_buffer + m_next, m_size - m_next, format, firstPassArgList);
-
+
+ va_end(firstPassArgList);
+
int numberOfBytesThatWouldHaveBeenWritten =
numberOfBytesNotIncludingTerminatorThatWouldHaveBeenWritten + 1;
-
+
if (m_next + numberOfBytesThatWouldHaveBeenWritten <= m_size) {
m_next += numberOfBytesNotIncludingTerminatorThatWouldHaveBeenWritten;
return; // This means that vsnprintf() succeeded.
}
-
+
increaseSize(m_next + numberOfBytesThatWouldHaveBeenWritten);
-
+
int numberOfBytesNotIncludingTerminatorThatWereWritten =
vsnprintf(m_buffer + m_next, m_size - m_next, format, argList);
-
+
int numberOfBytesThatWereWritten = numberOfBytesNotIncludingTerminatorThatWereWritten + 1;
-
+
ASSERT_UNUSED(numberOfBytesThatWereWritten, m_next + numberOfBytesThatWereWritten <= m_size);
-
+
m_next += numberOfBytesNotIncludingTerminatorThatWereWritten;
-
+
ASSERT_WITH_SECURITY_IMPLICATION(m_next < m_size);
ASSERT(!m_buffer[m_next]);
}
@@ -100,14 +102,20 @@ String StringPrintStream::toString()
return String::fromUTF8(m_buffer, m_next);
}
+String StringPrintStream::toStringWithLatin1Fallback()
+{
+ ASSERT(m_next == strlen(m_buffer));
+ return String::fromUTF8WithLatin1Fallback(m_buffer, m_next);
+}
+
void StringPrintStream::increaseSize(size_t newSize)
{
ASSERT_WITH_SECURITY_IMPLICATION(newSize > m_size);
ASSERT(newSize > sizeof(m_inlineBuffer));
-
+
// Use exponential resizing to reduce thrashing.
m_size = newSize << 1;
-
+
// Use fastMalloc instead of fastRealloc because we know that for the sizes we're using,
// fastRealloc will just do malloc+free anyway. Also, this simplifies the code since
// we can't realloc the inline buffer.
diff --git a/Source/WTF/wtf/StringPrintStream.h b/Source/WTF/wtf/StringPrintStream.h
index 3bbb3fbe2..f5de7362c 100644
--- a/Source/WTF/wtf/StringPrintStream.h
+++ b/Source/WTF/wtf/StringPrintStream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,10 +37,13 @@ public:
WTF_EXPORT_PRIVATE StringPrintStream();
WTF_EXPORT_PRIVATE virtual ~StringPrintStream();
- virtual void vprintf(const char* format, va_list) override WTF_ATTRIBUTE_PRINTF(2, 0);
+ WTF_EXPORT_PRIVATE void vprintf(const char* format, va_list) override WTF_ATTRIBUTE_PRINTF(2, 0);
+
+ size_t length() const { return m_next; }
WTF_EXPORT_PRIVATE CString toCString();
WTF_EXPORT_PRIVATE String toString();
+ WTF_EXPORT_PRIVATE String toStringWithLatin1Fallback();
WTF_EXPORT_PRIVATE void reset();
private:
@@ -53,57 +56,20 @@ private:
};
// Stringify any type T that has a WTF::printInternal(PrintStream&, const T&)
-template<typename T>
-CString toCString(const T& value)
-{
- StringPrintStream stream;
- stream.print(value);
- return stream.toCString();
-}
-template<typename T1, typename T2>
-CString toCString(const T1& value1, const T2& value2)
-{
- StringPrintStream stream;
- stream.print(value1, value2);
- return stream.toCString();
-}
-template<typename T1, typename T2, typename T3>
-CString toCString(const T1& value1, const T2& value2, const T3& value3)
-{
- StringPrintStream stream;
- stream.print(value1, value2, value3);
- return stream.toCString();
-}
-
-template<typename T1, typename T2, typename T3, typename T4>
-CString toCString(const T1& value1, const T2& value2, const T3& value3, const T4& value4)
-{
- StringPrintStream stream;
- stream.print(value1, value2, value3, value4);
- return stream.toCString();
-}
-
-template<typename T1, typename T2, typename T3, typename T4, typename T5>
-CString toCString(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5)
-{
- StringPrintStream stream;
- stream.print(value1, value2, value3, value4, value5);
- return stream.toCString();
-}
-template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
-CString toCString(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6)
+template<typename... Types>
+CString toCString(const Types&... values)
{
StringPrintStream stream;
- stream.print(value1, value2, value3, value4, value5, value6);
+ stream.print(values...);
return stream.toCString();
}
-template<typename T>
-String toString(const T& value)
+template<typename... Types>
+String toString(const Types&... values)
{
StringPrintStream stream;
- stream.print(value);
+ stream.print(values...);
return stream.toString();
}
diff --git a/Source/WTF/wtf/SynchronizedFixedQueue.h b/Source/WTF/wtf/SynchronizedFixedQueue.h
new file mode 100644
index 000000000..15f97f90e
--- /dev/null
+++ b/Source/WTF/wtf/SynchronizedFixedQueue.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Condition.h>
+#include <wtf/Deque.h>
+#include <wtf/HashSet.h>
+#include <wtf/Lock.h>
+#include <wtf/Locker.h>
+
+namespace WTF {
+
+template<typename T, size_t BufferSize>
+class SynchronizedFixedQueue {
+public:
+ SynchronizedFixedQueue()
+ {
+ static_assert(!((BufferSize - 1) & BufferSize), "BufferSize must be power of 2.");
+ }
+
+ void open()
+ {
+ LockHolder lockHolder(m_mutex);
+ if (m_open)
+ return;
+
+ // Restore the queue to its original state.
+ m_open = true;
+ m_queue.clear();
+ }
+
+ void close()
+ {
+ LockHolder lockHolder(m_mutex);
+ if (!m_open)
+ return;
+
+ // Wake all the sleeping threads up with a closing state.
+ m_open = false;
+ m_condition.notifyAll();
+ }
+
+ bool isOpen()
+ {
+ LockHolder lockHolder(m_mutex);
+ return m_open;
+ }
+
+ bool enqueue(const T& value)
+ {
+ LockHolder lockHolder(m_mutex);
+
+ // Wait for an empty place to be available in the queue.
+ m_condition.wait(m_mutex, [this]() { return !m_open || m_queue.size() < BufferSize; });
+
+ // The queue is closing, exit immediately.
+ if (!m_open)
+ return false;
+
+ // Add the item in the queue.
+ m_queue.append(value);
+
+ // Notify the other threads that an item was added to the queue.
+ m_condition.notifyAll();
+ return true;
+ }
+
+ bool dequeue(T& value)
+ {
+ LockHolder lockHolder(m_mutex);
+
+ // Wait for an item to be added.
+ m_condition.wait(m_mutex, [this]() { return !m_open || m_queue.size(); });
+
+ // The queue is closing, exit immediately.
+ if (!m_open)
+ return false;
+
+ // Get a copy from m_queue.first and then remove it.
+ value = m_queue.first();
+ m_queue.removeFirst();
+
+ // Notify the other threads that an item was removed from the queue.
+ m_condition.notifyAll();
+ return true;
+ }
+
+private:
+ Lock m_mutex;
+ Condition m_condition;
+
+ bool m_open { true };
+ Deque<T, BufferSize> m_queue;
+};
+
+}
+
+using WTF::SynchronizedFixedQueue;
diff --git a/Source/WTF/wtf/SystemTracing.h b/Source/WTF/wtf/SystemTracing.h
new file mode 100644
index 000000000..968fd678c
--- /dev/null
+++ b/Source/WTF/wtf/SystemTracing.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if USE(APPLE_INTERNAL_SDK)
+#include <System/sys/kdebug.h>
+#define HAVE_KDEBUG_H 1
+#endif
+
+// No namespaces because this file has to be includable from C and Objective-C.
+
+// Reserved component code. Do not change this.
+#define WEBKIT_COMPONENT 47
+
+// Trace point codes can be up to 14 bits (0-16383).
+// When adding or changing these codes, update Tools/Tracing/SystemTracePoints.plist to match.
+enum TracePointCode {
+ WTFRange = 0,
+
+ JavaScriptRange = 2500,
+ VMEntryScopeStart,
+ VMEntryScopeEnd,
+
+ WebCoreRange = 5000,
+ StyleRecalcStart,
+ StyleRecalcEnd,
+ LayoutStart,
+ LayoutEnd,
+ PaintViewStart,
+ PaintViewEnd,
+ PaintLayerStart,
+ PaintLayerEnd,
+ RAFDisplayLinkScheduled,
+ RAFDisplayLinkFired,
+ RAFCallbackStart,
+ RAFCallbackEnd,
+
+ WebKitRange = 10000,
+ WebKit2Range = 12000,
+
+ RAFDidUpdateStart,
+ RAFDidUpdateEnd,
+ RAFBackingStoreFlushStart,
+ RAFBackingStoreFlushEnd,
+ RAFBuildTransactionStart,
+ RAFBuildTransactionEnd,
+
+ UIProcessRange = 14000,
+
+ RAFCommitLayerTreeStart,
+ RAFCommitLayerTreeEnd,
+ RAFDidRefreshDisplayStart,
+ RAFDidRefreshDisplayEnd,
+};
+
+#ifdef __cplusplus
+
+namespace WTF {
+
+inline void TracePoint(TracePointCode code, uint64_t data1 = 0, uint64_t data2 = 0, uint64_t data3 = 0, uint64_t data4 = 0)
+{
+#if HAVE(KDEBUG_H)
+ kdebug_trace(ARIADNEDBG_CODE(WEBKIT_COMPONENT, code), data1, data2, data3, data4);
+#else
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(data1);
+ UNUSED_PARAM(data2);
+ UNUSED_PARAM(data3);
+ UNUSED_PARAM(data4);
+#endif
+}
+
+class TraceScope {
+public:
+
+ TraceScope(TracePointCode entryCode, TracePointCode exitCode)
+ : m_exitCode(exitCode)
+ {
+ TracePoint(entryCode);
+ }
+
+ ~TraceScope()
+ {
+ TracePoint(m_exitCode);
+ }
+
+private:
+ TracePointCode m_exitCode;
+};
+
+} // namespace WTF
+
+using WTF::TraceScope;
+using WTF::TracePoint;
+
+#endif // __cplusplus
diff --git a/Source/WTF/wtf/TCPackedCache.h b/Source/WTF/wtf/TCPackedCache.h
deleted file mode 100644
index 0464f8fdc..000000000
--- a/Source/WTF/wtf/TCPackedCache.h
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright (c) 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Geoff Pike
-//
-// This file provides a minimal cache that can hold a <key, value> pair
-// with little if any wasted space. The types of the key and value
-// must be unsigned integral types or at least have unsigned semantics
-// for >>, casting, and similar operations.
-//
-// Synchronization is not provided. However, the cache is implemented
-// as an array of cache entries whose type is chosen at compile time.
-// If a[i] is atomic on your hardware for the chosen array type then
-// raciness will not necessarily lead to bugginess. The cache entries
-// must be large enough to hold a partial key and a value packed
-// together. The partial keys are bit strings of length
-// kKeybits - kHashbits, and the values are bit strings of length kValuebits.
-//
-// In an effort to use minimal space, every cache entry represents
-// some <key, value> pair; the class provides no way to mark a cache
-// entry as empty or uninitialized. In practice, you may want to have
-// reserved keys or values to get around this limitation. For example, in
-// tcmalloc's PageID-to-sizeclass cache, a value of 0 is used as
-// "unknown sizeclass."
-//
-// Usage Considerations
-// --------------------
-//
-// kHashbits controls the size of the cache. The best value for
-// kHashbits will of course depend on the application. Perhaps try
-// tuning the value of kHashbits by measuring different values on your
-// favorite benchmark. Also remember not to be a pig; other
-// programs that need resources may suffer if you are.
-//
-// The main uses for this class will be when performance is
-// critical and there's a convenient type to hold the cache's
-// entries. As described above, the number of bits required
-// for a cache entry is (kKeybits - kHashbits) + kValuebits. Suppose
-// kKeybits + kValuebits is 43. Then it probably makes sense to
-// chose kHashbits >= 11 so that cache entries fit in a uint32.
-//
-// On the other hand, suppose kKeybits = kValuebits = 64. Then
-// using this class may be less worthwhile. You'll probably
-// be using 128 bits for each entry anyway, so maybe just pick
-// a hash function, H, and use an array indexed by H(key):
-// void Put(K key, V value) { a_[H(key)] = pair<K, V>(key, value); }
-// V GetOrDefault(K key, V default) { const pair<K, V> &p = a_[H(key)]; ... }
-// etc.
-//
-// Further Details
-// ---------------
-//
-// For caches used only by one thread, the following is true:
-// 1. For a cache c,
-// (c.Put(key, value), c.GetOrDefault(key, 0)) == value
-// and
-// (c.Put(key, value), <...>, c.GetOrDefault(key, 0)) == value
-// if the elided code contains no c.Put calls.
-//
-// 2. Has(key) will return false if no <key, value> pair with that key
-// has ever been Put. However, a newly initialized cache will have
-// some <key, value> pairs already present. When you create a new
-// cache, you must specify an "initial value." The initialization
-// procedure is equivalent to Clear(initial_value), which is
-// equivalent to Put(k, initial_value) for all keys k from 0 to
-// 2^kHashbits - 1.
-//
-// 3. If key and key' differ then the only way Put(key, value) may
-// cause Has(key') to change is that Has(key') may change from true to
-// false. Furthermore, a Put() call that doesn't change Has(key')
-// doesn't change GetOrDefault(key', ...) either.
-//
-// Implementation details:
-//
-// This is a direct-mapped cache with 2^kHashbits entries;
-// the hash function simply takes the low bits of the key.
-// So, we don't have to store the low bits of the key in the entries.
-// Instead, an entry is the high bits of a key and a value, packed
-// together. E.g., a 20 bit key and a 7 bit value only require
-// a uint16 for each entry if kHashbits >= 11.
-//
-// Alternatives to this scheme will be added as needed.
-
-#ifndef TCMALLOC_PACKED_CACHE_INL_H__
-#define TCMALLOC_PACKED_CACHE_INL_H__
-
-#ifndef WTF_CHANGES
-#include "base/basictypes.h" // for COMPILE_ASSERT
-#include "base/logging.h" // for DCHECK
-#endif
-
-#ifndef DCHECK_EQ
-#define DCHECK_EQ(val1, val2) ASSERT((val1) == (val2))
-#endif
-
-// A safe way of doing "(1 << n) - 1" -- without worrying about overflow
-// Note this will all be resolved to a constant expression at compile-time
-#define N_ONES_(IntType, N) \
- ( (N) == 0 ? 0 : ((static_cast<IntType>(1) << ((N)-1))-1 + \
- (static_cast<IntType>(1) << ((N)-1))) )
-
-// The types K and V provide upper bounds on the number of valid keys
-// and values, but we explicitly require the keys to be less than
-// 2^kKeybits and the values to be less than 2^kValuebits. The size of
-// the table is controlled by kHashbits, and the type of each entry in
-// the cache is T. See also the big comment at the top of the file.
-template <int kKeybits, typename T>
-class PackedCache {
- public:
- typedef uintptr_t K;
- typedef size_t V;
- static const size_t kHashbits = 12;
- static const size_t kValuebits = 8;
-
- explicit PackedCache(V initial_value) {
- COMPILE_ASSERT(kKeybits <= sizeof(K) * 8, key_size);
- COMPILE_ASSERT(kValuebits <= sizeof(V) * 8, value_size);
- COMPILE_ASSERT(kHashbits <= kKeybits, hash_function);
- COMPILE_ASSERT(kKeybits - kHashbits + kValuebits <= kTbits,
- entry_size_must_be_big_enough);
- Clear(initial_value);
- }
-
- void Put(K key, V value) {
- DCHECK_EQ(key, key & kKeyMask);
- DCHECK_EQ(value, value & kValueMask);
- array_[Hash(key)] = static_cast<T>(KeyToUpper(key) | value);
- }
-
- bool Has(K key) const {
- DCHECK_EQ(key, key & kKeyMask);
- return KeyMatch(array_[Hash(key)], key);
- }
-
- V GetOrDefault(K key, V default_value) const {
- // As with other code in this class, we touch array_ as few times
- // as we can. Assuming entries are read atomically (e.g., their
- // type is uintptr_t on most hardware) then certain races are
- // harmless.
- DCHECK_EQ(key, key & kKeyMask);
- T entry = array_[Hash(key)];
- return KeyMatch(entry, key) ? EntryToValue(entry) : default_value;
- }
-
- void Clear(V value) {
- DCHECK_EQ(value, value & kValueMask);
- for (int i = 0; i < 1 << kHashbits; i++) {
- array_[i] = static_cast<T>(value);
- }
- }
-
- private:
- // We are going to pack a value and the upper part of a key into
- // an entry of type T. The UPPER type is for the upper part of a key,
- // after the key has been masked and shifted for inclusion in an entry.
- typedef T UPPER;
-
- static V EntryToValue(T t) { return t & kValueMask; }
-
- static UPPER EntryToUpper(T t) { return t & kUpperMask; }
-
- // If v is a V and u is an UPPER then you can create an entry by
- // doing u | v. kHashbits determines where in a K to find the upper
- // part of the key, and kValuebits determines where in the entry to put
- // it.
- static UPPER KeyToUpper(K k) {
- const int shift = kHashbits - kValuebits;
- // Assume kHashbits >= kValuebits. It would be easy to lift this assumption.
- return static_cast<T>(k >> shift) & kUpperMask;
- }
-
- // This is roughly the inverse of KeyToUpper(). Some of the key has been
- // thrown away, since KeyToUpper() masks off the low bits of the key.
- static K UpperToPartialKey(UPPER u) {
- DCHECK_EQ(u, u & kUpperMask);
- const int shift = kHashbits - kValuebits;
- // Assume kHashbits >= kValuebits. It would be easy to lift this assumption.
- return static_cast<K>(u) << shift;
- }
-
- static size_t Hash(K key) {
- return static_cast<size_t>(key) & N_ONES_(size_t, kHashbits);
- }
-
- // Does the entry's partial key match the relevant part of the given key?
- static bool KeyMatch(T entry, K key) {
- return ((KeyToUpper(key) ^ entry) & kUpperMask) == 0;
- }
-
- static const size_t kTbits = 8 * sizeof(T);
- static const int kUpperbits = kKeybits - kHashbits;
-
- // For masking a K.
- static const K kKeyMask = N_ONES_(K, kKeybits);
-
- // For masking a T.
- static const T kUpperMask = N_ONES_(T, kUpperbits) << kValuebits;
-
- // For masking a V or a T.
- static const V kValueMask = N_ONES_(V, kValuebits);
-
- T array_[1 << kHashbits];
-};
-
-#undef N_ONES_
-
-#endif // TCMALLOC_PACKED_CACHE_INL_H__
diff --git a/Source/WTF/wtf/TCPageMap.h b/Source/WTF/wtf/TCPageMap.h
deleted file mode 100644
index 94016a6c6..000000000
--- a/Source/WTF/wtf/TCPageMap.h
+++ /dev/null
@@ -1,361 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// A data structure used by the caching malloc. It maps from page# to
-// a pointer that contains info about that page. We use two
-// representations: one for 32-bit addresses, and another for 64 bit
-// addresses. Both representations provide the same interface. The
-// first representation is implemented as a flat array, the seconds as
-// a three-level radix tree that strips away approximately 1/3rd of
-// the bits every time.
-//
-// The BITS parameter should be the number of bits required to hold
-// a page number. E.g., with 32 bit pointers and 4K pages (i.e.,
-// page offset fits in lower 12 bits), BITS == 20.
-
-#ifndef TCMALLOC_PAGEMAP_H__
-#define TCMALLOC_PAGEMAP_H__
-
-#include <stdint.h>
-#include <string.h>
-#include <wtf/Assertions.h>
-
-// Single-level array
-template <int BITS>
-class TCMalloc_PageMap1 {
- private:
- void** array_;
-
- public:
- typedef uintptr_t Number;
-
- void init(void* (*allocator)(size_t)) {
- array_ = reinterpret_cast<void**>((*allocator)(sizeof(void*) << BITS));
- memset(array_, 0, sizeof(void*) << BITS);
- }
-
- // Ensure that the map contains initialized entries "x .. x+n-1".
- // Returns true if successful, false if we could not allocate memory.
- bool Ensure(Number, size_t) {
- // Nothing to do since flat array was allocate at start
- return true;
- }
-
- void PreallocateMoreMemory() {}
-
- // REQUIRES "k" is in range "[0,2^BITS-1]".
- // REQUIRES "k" has been ensured before.
- //
- // Return the current value for KEY. Returns "Value()" if not
- // yet set.
- void* get(Number k) const {
- return array_[k];
- }
-
- // REQUIRES "k" is in range "[0,2^BITS-1]".
- // REQUIRES "k" has been ensured before.
- //
- // Sets the value for KEY.
- void set(Number k, void* v) {
- array_[k] = v;
- }
-};
-
-// Two-level radix tree
-template <int BITS>
-class TCMalloc_PageMap2 {
- private:
- // Put 32 entries in the root and (2^BITS)/32 entries in each leaf.
- static const int ROOT_BITS = 5;
- static const int ROOT_LENGTH = 1 << ROOT_BITS;
-
- static const int LEAF_BITS = BITS - ROOT_BITS;
- static const int LEAF_LENGTH = 1 << LEAF_BITS;
-
- // Leaf node
- struct Leaf {
- void* values[LEAF_LENGTH];
- };
-
- Leaf* root_[ROOT_LENGTH]; // Pointers to 32 child nodes
- void* (*allocator_)(size_t); // Memory allocator
-
- public:
- typedef uintptr_t Number;
-
- void init(void* (*allocator)(size_t)) {
- allocator_ = allocator;
- memset(root_, 0, sizeof(root_));
- }
-
- void* get(Number k) const {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> LEAF_BITS;
- const Number i2 = k & (LEAF_LENGTH-1);
- return root_[i1]->values[i2];
- }
-
- void set(Number k, void* v) {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> LEAF_BITS;
- const Number i2 = k & (LEAF_LENGTH-1);
- root_[i1]->values[i2] = v;
- }
-
- bool Ensure(Number start, size_t n) {
- for (Number key = start; key <= start + n - 1; ) {
- const Number i1 = key >> LEAF_BITS;
-
- // Make 2nd level node if necessary
- if (root_[i1] == NULL) {
- Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
- if (leaf == NULL) return false;
- memset(leaf, 0, sizeof(*leaf));
- root_[i1] = leaf;
- }
-
- // Advance key past whatever is covered by this leaf node
- key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
- }
- return true;
- }
-
- void PreallocateMoreMemory() {
- // Allocate enough to keep track of all possible pages
- Ensure(0, 1 << BITS);
- }
-
-#ifdef WTF_CHANGES
- template<class Visitor, class MemoryReader>
- void visitValues(Visitor& visitor, const MemoryReader& reader)
- {
- const Number leafIndexMask = LEAF_LENGTH - 1;
-
- const Number maxKey = (1l << BITS) - 1;
- const Number invalidIndex = maxKey;
- Number previousRootIndex = invalidIndex;
-
- Leaf* leaf = 0;
-
- for (Number key = 0; key < maxKey; ) {
- const Number rootIndex = key >> LEAF_BITS;
- const Number leafIndex = key & leafIndexMask;
-
- if (rootIndex != previousRootIndex) {
- if (!root_[rootIndex]) {
- // There's no node at this index. Move on to the next index at the root level,
- // clearing the leaf index so that we start from the beginning of the next node.
- key += 1 << LEAF_BITS;
- key &= ~leafIndexMask;
- continue;
- }
-
- leaf = reader(root_[rootIndex]);
- previousRootIndex = rootIndex;
- }
-
- key += visitor.visit(leaf->values[leafIndex]);
- }
- }
-
- template<class Visitor, class MemoryReader>
- void visitAllocations(Visitor& visitor, const MemoryReader&) {
- for (int i = 0; i < ROOT_LENGTH; i++) {
- if (root_[i])
- visitor.visit(root_[i], sizeof(Leaf));
- }
- }
-#endif
-};
-
-// Three-level radix tree
-template <int BITS>
-class TCMalloc_PageMap3 {
- private:
- // How many bits should we consume at each interior level
- static const int INTERIOR_BITS = (BITS + 2) / 3; // Round-up
- static const int INTERIOR_LENGTH = 1 << INTERIOR_BITS;
-
- // How many bits should we consume at leaf level
- static const int LEAF_BITS = BITS - 2*INTERIOR_BITS;
- static const int LEAF_LENGTH = 1 << LEAF_BITS;
-
- // Interior node
- struct Node {
- Node* ptrs[INTERIOR_LENGTH];
- };
-
- // Leaf node
- struct Leaf {
- void* values[LEAF_LENGTH];
- };
-
- Node* root_; // Root of radix tree
- void* (*allocator_)(size_t); // Memory allocator
-
- Node* NewNode() {
- Node* result = reinterpret_cast<Node*>((*allocator_)(sizeof(Node)));
- if (result != NULL) {
- memset(result, 0, sizeof(*result));
- }
- return result;
- }
-
- public:
- typedef uintptr_t Number;
-
- void init(void* (*allocator)(size_t)) {
- allocator_ = allocator;
- root_ = NewNode();
- }
-
- void* get(Number k) const {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
- const Number i3 = k & (LEAF_LENGTH-1);
- return reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3];
- }
-
- void set(Number k, void* v) {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
- const Number i3 = k & (LEAF_LENGTH-1);
- reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3] = v;
- }
-
- bool Ensure(Number start, size_t n) {
- for (Number key = start; key <= start + n - 1; ) {
- const Number i1 = key >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (key >> LEAF_BITS) & (INTERIOR_LENGTH-1);
-
- // Make 2nd level node if necessary
- if (root_->ptrs[i1] == NULL) {
- Node* n = NewNode();
- if (n == NULL) return false;
- root_->ptrs[i1] = n;
- }
-
- // Make leaf node if necessary
- if (root_->ptrs[i1]->ptrs[i2] == NULL) {
- Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
- if (leaf == NULL) return false;
- memset(leaf, 0, sizeof(*leaf));
- root_->ptrs[i1]->ptrs[i2] = reinterpret_cast<Node*>(leaf);
- }
-
- // Advance key past whatever is covered by this leaf node
- key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
- }
- return true;
- }
-
- void PreallocateMoreMemory() {
- }
-
-#ifdef WTF_CHANGES
- template<class Visitor, class MemoryReader>
- void visitValues(Visitor& visitor, const MemoryReader& reader) {
- const Number intermediateIndexMask = (INTERIOR_LENGTH - 1) << LEAF_BITS;
- const Number leafIndexMask = LEAF_LENGTH - 1;
-
- const Number maxKey = (1l << BITS) - 1;
- const Number invalidIndex = maxKey;
- Number previousRootIndex = invalidIndex;
- Number previousIntermediateIndex = invalidIndex;
-
- Node* intermediateNode = 0;
- Leaf* leaf = 0;
-
- Node* root = reader(root_);
- for (Number key = 0; key < maxKey; ) {
- const Number rootIndex = key >> (LEAF_BITS + INTERIOR_BITS);
- const Number intermediateIndex = (key & intermediateIndexMask) >> LEAF_BITS;
- const Number leafIndex = key & leafIndexMask;
-
- if (rootIndex != previousRootIndex) {
- if (!root->ptrs[rootIndex]) {
- // There's no node at this index. Move on to the next index at the root level, clearing the
- // intermediate and leaf indices so that we start from the beginning of that next node.
- key += 1 << (LEAF_BITS + INTERIOR_BITS);
- key &= ~(leafIndexMask | intermediateIndexMask);
- continue;
- }
-
- intermediateNode = reader(root->ptrs[rootIndex]);
- previousRootIndex = rootIndex;
-
- // Invalidate the previous intermediate index since we've moved on to a different node.
- previousIntermediateIndex = invalidIndex;
- }
-
- if (intermediateIndex != previousIntermediateIndex) {
- if (!intermediateNode->ptrs[intermediateIndex]) {
- // There's no node at this index. Move on to the next index at the intermediate level,
- // clearing the leaf index so that we start from the beginning of the next node.
- key += 1 << LEAF_BITS;
- key &= ~leafIndexMask;
- continue;
- }
-
- leaf = reader(reinterpret_cast<Leaf*>(intermediateNode->ptrs[intermediateIndex]));
- previousIntermediateIndex = intermediateIndex;
- }
-
- key += visitor.visit(leaf->values[leafIndex]);
- }
- }
-
- template<class Visitor, class MemoryReader>
- void visitAllocations(Visitor& visitor, const MemoryReader& reader) {
- visitor.visit(root_, sizeof(Node));
-
- Node* root = reader(root_);
- for (int i = 0; i < INTERIOR_LENGTH; i++) {
- if (!root->ptrs[i])
- continue;
-
- visitor.visit(root->ptrs[i], sizeof(Node));
- Node* n = reader(root->ptrs[i]);
- for (int j = 0; j < INTERIOR_LENGTH; j++) {
- if (!n->ptrs[j])
- continue;
-
- visitor.visit(n->ptrs[j], sizeof(Leaf));
- }
- }
- }
-#endif
-};
-
-#endif // TCMALLOC_PAGEMAP_H__
diff --git a/Source/WTF/wtf/TCSpinLock.h b/Source/WTF/wtf/TCSpinLock.h
deleted file mode 100644
index d8eddcfb2..000000000
--- a/Source/WTF/wtf/TCSpinLock.h
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (c) 2005, 2006, Google Inc.
-// Copyright (c) 2010, Patrick Gansterer <paroga@paroga.com>
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-
-#ifndef TCMALLOC_INTERNAL_SPINLOCK_H__
-#define TCMALLOC_INTERNAL_SPINLOCK_H__
-
-#include <wtf/Atomics.h>
-#if OS(UNIX)
-#include <sched.h>
-#endif
-
-#if ENABLE(COMPARE_AND_SWAP)
-
-static void TCMalloc_SlowLock(unsigned* lockword);
-
-// The following is a struct so that it can be initialized at compile time
-struct TCMalloc_SpinLock {
- void Lock() {
- if (!WTF::weakCompareAndSwap(&lockword_, 0, 1))
- TCMalloc_SlowLock(&lockword_);
- WTF::memoryBarrierAfterLock();
- }
-
- void Unlock() {
- WTF::memoryBarrierBeforeUnlock();
- lockword_ = 0;
- }
-
- // Report if we think the lock can be held by this thread.
- // When the lock is truly held by the invoking thread
- // we will always return true.
- // Indended to be used as CHECK(lock.IsHeld());
- bool IsHeld() const {
- return lockword_ != 0;
- }
-
- void Init() { lockword_ = 0; }
- void Finalize() { }
-
- unsigned lockword_;
-};
-
-#define SPINLOCK_INITIALIZER { 0 }
-
-static void TCMalloc_SlowLock(unsigned* lockword) {
- do {
-#if OS(WINDOWS)
- Sleep(0);
-#else
- sched_yield();
-#endif
- } while (!WTF::weakCompareAndSwap(lockword, 0, 1));
-}
-
-#else
-
-#include <pthread.h>
-
-// Portable version
-struct TCMalloc_SpinLock {
- pthread_mutex_t private_lock_;
-
- inline void Init() {
- if (pthread_mutex_init(&private_lock_, NULL) != 0) CRASH();
- }
- inline void Finalize() {
- if (pthread_mutex_destroy(&private_lock_) != 0) CRASH();
- }
- inline void Lock() {
- if (pthread_mutex_lock(&private_lock_) != 0) CRASH();
- }
- inline void Unlock() {
- if (pthread_mutex_unlock(&private_lock_) != 0) CRASH();
- }
- bool IsHeld() {
- if (pthread_mutex_trylock(&private_lock_))
- return true;
-
- Unlock();
- return false;
- }
-};
-
-#define SPINLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER }
-
-#endif
-
-// Corresponding locker object that arranges to acquire a spinlock for
-// the duration of a C++ scope.
-class TCMalloc_SpinLockHolder {
- private:
- TCMalloc_SpinLock* lock_;
- public:
- inline explicit TCMalloc_SpinLockHolder(TCMalloc_SpinLock* l)
- : lock_(l) { l->Lock(); }
- inline ~TCMalloc_SpinLockHolder() { lock_->Unlock(); }
-};
-
-// Short-hands for convenient use by tcmalloc.cc
-typedef TCMalloc_SpinLock SpinLock;
-typedef TCMalloc_SpinLockHolder SpinLockHolder;
-
-#endif // TCMALLOC_INTERNAL_SPINLOCK_H__
diff --git a/Source/WTF/wtf/TCSystemAlloc.cpp b/Source/WTF/wtf/TCSystemAlloc.cpp
deleted file mode 100644
index 8f5c0525e..000000000
--- a/Source/WTF/wtf/TCSystemAlloc.cpp
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright (c) 2005, 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat
-
-#include "config.h"
-#if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC)
-#include "TCSystemAlloc.h"
-
-#include "Assertions.h"
-#include "CheckedArithmetic.h"
-#include "TCSpinLock.h"
-#include "VMTags.h"
-#include <algorithm>
-#include <stdint.h>
-
-#if OS(WINDOWS)
-#include "windows.h"
-#else
-#include <errno.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#endif
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-using namespace std;
-
-// Structure for discovering alignment
-union MemoryAligner {
- void* p;
- double d;
- size_t s;
-};
-
-static SpinLock spinlock = SPINLOCK_INITIALIZER;
-
-// Page size is initialized on demand
-static size_t pagesize = 0;
-
-// Configuration parameters.
-//
-// if use_devmem is true, either use_sbrk or use_mmap must also be true.
-// For 2.2 kernels, it looks like the sbrk address space (500MBish) and
-// the mmap address space (1300MBish) are disjoint, so we need both allocators
-// to get as much virtual memory as possible.
-#ifndef WTF_CHANGES
-static bool use_devmem = false;
-static bool use_sbrk = false;
-#endif
-
-#if HAVE(MMAP)
-static bool use_mmap = true;
-#endif
-
-#if HAVE(VIRTUALALLOC)
-static bool use_VirtualAlloc = true;
-#endif
-
-// Flags to keep us from retrying allocators that failed.
-static bool devmem_failure = false;
-static bool sbrk_failure = false;
-static bool mmap_failure = false;
-static bool VirtualAlloc_failure = false;
-
-#ifndef WTF_CHANGES
-DEFINE_int32(malloc_devmem_start, 0,
- "Physical memory starting location in MB for /dev/mem allocation."
- " Setting this to 0 disables /dev/mem allocation");
-DEFINE_int32(malloc_devmem_limit, 0,
- "Physical memory limit location in MB for /dev/mem allocation."
- " Setting this to 0 means no limit.");
-#endif
-
-#ifndef WTF_CHANGES
-
-static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) {
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // could theoretically return the "extra" bytes here, but this
- // is simple and correct.
- if (actual_size)
- *actual_size = size;
-
- void* result = sbrk(size);
- if (result == reinterpret_cast<void*>(-1)) {
- sbrk_failure = true;
- return NULL;
- }
-
- // Is it aligned?
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
- if ((ptr & (alignment-1)) == 0) return result;
-
- // Try to get more memory for alignment
- size_t extra = alignment - (ptr & (alignment-1));
- void* r2 = sbrk(extra);
- if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) {
- // Contiguous with previous result
- return reinterpret_cast<void*>(ptr + extra);
- }
-
- // Give up and ask for "size + alignment - 1" bytes so
- // that we can find an aligned region within it.
- result = sbrk(size + alignment - 1);
- if (result == reinterpret_cast<void*>(-1)) {
- sbrk_failure = true;
- return NULL;
- }
- ptr = reinterpret_cast<uintptr_t>(result);
- if ((ptr & (alignment-1)) != 0) {
- ptr += alignment - (ptr & (alignment-1));
- }
- return reinterpret_cast<void*>(ptr);
-}
-
-#endif /* ifndef(WTF_CHANGES) */
-
-#if HAVE(MMAP)
-
-static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) {
- // Enforce page alignment
- if (pagesize == 0) pagesize = getpagesize();
- if (alignment < pagesize) alignment = pagesize;
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // could theoretically return the "extra" bytes here, but this
- // is simple and correct.
- if (actual_size)
- *actual_size = size;
-
- // Ask for extra memory if alignment > pagesize
- size_t extra = 0;
- if (alignment > pagesize) {
- extra = alignment - pagesize;
- }
- Checked<size_t> mapSize = Checked<size_t>(size) + extra + 2 * pagesize;
- void* result = mmap(NULL, mapSize.unsafeGet(),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS,
- VM_TAG_FOR_TCMALLOC_MEMORY, 0);
- if (result == reinterpret_cast<void*>(MAP_FAILED)) {
- mmap_failure = true;
- return NULL;
- }
- mmap(result, pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0);
- mmap(static_cast<char*>(result) + (mapSize - pagesize).unsafeGet(), pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0);
- result = static_cast<char*>(result) + pagesize;
- // Adjust the return memory so it is aligned
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
- size_t adjust = 0;
- if ((ptr & (alignment - 1)) != 0) {
- adjust = alignment - (ptr & (alignment - 1));
- }
-
- // Return the unused memory to the system
- if (adjust > 0) {
- munmap(reinterpret_cast<void*>(ptr), adjust);
- }
- if (adjust < extra) {
- munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
- }
-
- ptr += adjust;
- return reinterpret_cast<void*>(ptr);
-}
-
-#endif /* HAVE(MMAP) */
-
-#if HAVE(VIRTUALALLOC)
-
-static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment) {
- // Enforce page alignment
- if (pagesize == 0) {
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- pagesize = system_info.dwPageSize;
- }
-
- if (alignment < pagesize) alignment = pagesize;
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // could theoretically return the "extra" bytes here, but this
- // is simple and correct.
- if (actual_size)
- *actual_size = size;
-
- // Ask for extra memory if alignment > pagesize
- size_t extra = 0;
- if (alignment > pagesize) {
- extra = alignment - pagesize;
- }
- void* result = VirtualAlloc(NULL, size + extra,
- MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
- PAGE_READWRITE);
-
- if (result == NULL) {
- VirtualAlloc_failure = true;
- return NULL;
- }
-
- // Adjust the return memory so it is aligned
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
- size_t adjust = 0;
- if ((ptr & (alignment - 1)) != 0) {
- adjust = alignment - (ptr & (alignment - 1));
- }
-
- // Return the unused memory to the system - we'd like to release but the best we can do
- // is decommit, since Windows only lets you free the whole allocation.
- if (adjust > 0) {
- VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT);
- }
- if (adjust < extra) {
- VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_DECOMMIT);
- }
-
- ptr += adjust;
- return reinterpret_cast<void*>(ptr);
-}
-
-#endif /* HAVE(MMAP) */
-
-#ifndef WTF_CHANGES
-static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) {
- static bool initialized = false;
- static off_t physmem_base; // next physical memory address to allocate
- static off_t physmem_limit; // maximum physical address allowed
- static int physmem_fd; // file descriptor for /dev/mem
-
- // Check if we should use /dev/mem allocation. Note that it may take
- // a while to get this flag initialized, so meanwhile we fall back to
- // the next allocator. (It looks like 7MB gets allocated before
- // this flag gets initialized -khr.)
- if (FLAGS_malloc_devmem_start == 0) {
- // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to
- // try us again next time.
- return NULL;
- }
-
- if (!initialized) {
- physmem_fd = open("/dev/mem", O_RDWR);
- if (physmem_fd < 0) {
- devmem_failure = true;
- return NULL;
- }
- physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL;
- physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL;
- initialized = true;
- }
-
- // Enforce page alignment
- if (pagesize == 0) pagesize = getpagesize();
- if (alignment < pagesize) alignment = pagesize;
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // could theoretically return the "extra" bytes here, but this
- // is simple and correct.
- if (actual_size)
- *actual_size = size;
-
- // Ask for extra memory if alignment > pagesize
- size_t extra = 0;
- if (alignment > pagesize) {
- extra = alignment - pagesize;
- }
-
- // check to see if we have any memory left
- if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) {
- devmem_failure = true;
- return NULL;
- }
- void *result = mmap(0, size + extra, PROT_READ | PROT_WRITE,
- MAP_SHARED, physmem_fd, physmem_base);
- if (result == reinterpret_cast<void*>(MAP_FAILED)) {
- devmem_failure = true;
- return NULL;
- }
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
-
- // Adjust the return memory so it is aligned
- size_t adjust = 0;
- if ((ptr & (alignment - 1)) != 0) {
- adjust = alignment - (ptr & (alignment - 1));
- }
-
- // Return the unused virtual memory to the system
- if (adjust > 0) {
- munmap(reinterpret_cast<void*>(ptr), adjust);
- }
- if (adjust < extra) {
- munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
- }
-
- ptr += adjust;
- physmem_base += adjust + size;
-
- return reinterpret_cast<void*>(ptr);
-}
-#endif
-
-void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) {
- // Discard requests that overflow
- if (size + alignment < size) return NULL;
-
- SpinLockHolder lock_holder(&spinlock);
-
- // Enforce minimum alignment
- if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner);
-
- // Try twice, once avoiding allocators that failed before, and once
- // more trying all allocators even if they failed before.
- for (int i = 0; i < 2; i++) {
-
-#ifndef WTF_CHANGES
- if (use_devmem && !devmem_failure) {
- void* result = TryDevMem(size, actual_size, alignment);
- if (result != NULL) return result;
- }
-
- if (use_sbrk && !sbrk_failure) {
- void* result = TrySbrk(size, actual_size, alignment);
- if (result != NULL) return result;
- }
-#endif
-
-#if HAVE(MMAP)
- if (use_mmap && !mmap_failure) {
- void* result = TryMmap(size, actual_size, alignment);
- if (result != NULL) return result;
- }
-#endif
-
-#if HAVE(VIRTUALALLOC)
- if (use_VirtualAlloc && !VirtualAlloc_failure) {
- void* result = TryVirtualAlloc(size, actual_size, alignment);
- if (result != NULL) return result;
- }
-#endif
-
- // nothing worked - reset failure flags and try again
- devmem_failure = false;
- sbrk_failure = false;
- mmap_failure = false;
- VirtualAlloc_failure = false;
- }
- return NULL;
-}
-
-#if HAVE(MADV_FREE_REUSE)
-
-void TCMalloc_SystemRelease(void* start, size_t length)
-{
- int madviseResult;
-
- while ((madviseResult = madvise(start, length, MADV_FREE_REUSABLE)) == -1 && errno == EAGAIN) { }
-
- // Although really advisory, if madvise fail, we want to know about it.
- ASSERT_UNUSED(madviseResult, madviseResult != -1);
-}
-
-#elif HAVE(MADV_FREE) || HAVE(MADV_DONTNEED)
-
-void TCMalloc_SystemRelease(void* start, size_t length)
-{
- // MADV_FREE clears the modified bit on pages, which allows
- // them to be discarded immediately.
-#if HAVE(MADV_FREE)
- const int advice = MADV_FREE;
-#else
- const int advice = MADV_DONTNEED;
-#endif
- if (pagesize == 0) pagesize = getpagesize();
- const size_t pagemask = pagesize - 1;
-
- size_t new_start = reinterpret_cast<size_t>(start);
- size_t end = new_start + length;
- size_t new_end = end;
-
- // Round up the starting address and round down the ending address
- // to be page aligned:
- new_start = (new_start + pagesize - 1) & ~pagemask;
- new_end = new_end & ~pagemask;
-
- ASSERT((new_start & pagemask) == 0);
- ASSERT((new_end & pagemask) == 0);
- ASSERT(new_start >= reinterpret_cast<size_t>(start));
- ASSERT(new_end <= end);
-
- if (new_end > new_start) {
- // Note -- ignoring most return codes, because if this fails it
- // doesn't matter...
- while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start,
- advice) == -1 &&
- errno == EAGAIN) {
- // NOP
- }
- }
-}
-
-#elif HAVE(MMAP)
-
-void TCMalloc_SystemRelease(void* start, size_t length)
-{
- void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
- // If the mmap failed then that's ok, we just won't return the memory to the system.
- ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED));
-}
-
-#elif HAVE(VIRTUALALLOC)
-
-void TCMalloc_SystemRelease(void* start, size_t length)
-{
- if (VirtualFree(start, length, MEM_DECOMMIT))
- return;
-
- // The decommit may fail if the memory region consists of allocations
- // from more than one call to VirtualAlloc. In this case, fall back to
- // using VirtualQuery to retrieve the allocation boundaries and decommit
- // them each individually.
-
- char* ptr = static_cast<char*>(start);
- char* end = ptr + length;
- MEMORY_BASIC_INFORMATION info;
- while (ptr < end) {
- size_t resultSize = VirtualQuery(ptr, &info, sizeof(info));
- ASSERT_UNUSED(resultSize, resultSize == sizeof(info));
-
- size_t decommitSize = min<size_t>(info.RegionSize, end - ptr);
- BOOL success = VirtualFree(ptr, decommitSize, MEM_DECOMMIT);
- ASSERT_UNUSED(success, success);
- ptr += decommitSize;
- }
-}
-
-#else
-
-// Platforms that don't support returning memory use an empty inline version of TCMalloc_SystemRelease
-// declared in TCSystemAlloc.h
-
-#endif
-
-#if HAVE(MADV_FREE_REUSE)
-
-void TCMalloc_SystemCommit(void* start, size_t length)
-{
- while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
-}
-
-#elif HAVE(VIRTUALALLOC)
-
-void TCMalloc_SystemCommit(void* start, size_t length)
-{
- if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start)
- return;
-
- // The commit may fail if the memory region consists of allocations
- // from more than one call to VirtualAlloc. In this case, fall back to
- // using VirtualQuery to retrieve the allocation boundaries and commit them
- // each individually.
-
- char* ptr = static_cast<char*>(start);
- char* end = ptr + length;
- MEMORY_BASIC_INFORMATION info;
- while (ptr < end) {
- size_t resultSize = VirtualQuery(ptr, &info, sizeof(info));
- ASSERT_UNUSED(resultSize, resultSize == sizeof(info));
-
- size_t commitSize = min<size_t>(info.RegionSize, end - ptr);
- void* newAddress = VirtualAlloc(ptr, commitSize, MEM_COMMIT, PAGE_READWRITE);
- ASSERT_UNUSED(newAddress, newAddress == ptr);
- ptr += commitSize;
- }
-}
-
-#else
-
-// Platforms that don't need to explicitly commit memory use an empty inline version of TCMalloc_SystemCommit
-// declared in TCSystemAlloc.h
-
-#endif
-
-#endif // #if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC)
-
diff --git a/Source/WTF/wtf/TCSystemAlloc.h b/Source/WTF/wtf/TCSystemAlloc.h
deleted file mode 100644
index 1c677889c..000000000
--- a/Source/WTF/wtf/TCSystemAlloc.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (c) 2005, 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat
-//
-// Routine that uses sbrk/mmap to allocate memory from the system.
-// Useful for implementing malloc.
-
-#ifndef TCMALLOC_SYSTEM_ALLOC_H__
-#define TCMALLOC_SYSTEM_ALLOC_H__
-
-// REQUIRES: "alignment" is a power of two or "0" to indicate default alignment
-//
-// Allocate and return "N" bytes of zeroed memory.
-//
-// If actual_bytes is NULL then the returned memory is exactly the
-// requested size. If actual bytes is non-NULL then the allocator
-// may optionally return more bytes than asked for (i.e. return an
-// entire "huge" page if a huge page allocator is in use).
-//
-// The returned pointer is a multiple of "alignment" if non-zero.
-//
-// Returns NULL when out of memory.
-extern void* TCMalloc_SystemAlloc(size_t bytes, size_t *actual_bytes,
- size_t alignment = 0);
-
-// This call is a hint to the operating system that the pages
-// contained in the specified range of memory will not be used for a
-// while, and can be released for use by other processes or the OS.
-// Pages which are released in this way may be destroyed (zeroed) by
-// the OS. The benefit of this function is that it frees memory for
-// use by the system, the cost is that the pages are faulted back into
-// the address space next time they are touched, which can impact
-// performance. (Only pages fully covered by the memory region will
-// be released, partial pages will not.)
-extern void TCMalloc_SystemRelease(void* start, size_t length);
-
-extern void TCMalloc_SystemCommit(void* start, size_t length);
-
-#if !HAVE(MADV_FREE_REUSE) && !HAVE(MADV_DONTNEED) && !HAVE(MMAP) && !HAVE(VIRTUALALLOC)
-inline void TCMalloc_SystemRelease(void*, size_t) { }
-#endif
-
-#if !HAVE(VIRTUALALLOC) && !HAVE(MADV_FREE_REUSE)
-inline void TCMalloc_SystemCommit(void*, size_t) { }
-#endif
-
-#endif /* TCMALLOC_SYSTEM_ALLOC_H__ */
diff --git a/Source/WTF/wtf/ThreadFunctionInvocation.h b/Source/WTF/wtf/ThreadFunctionInvocation.h
index b9268e95b..943fd6d79 100644
--- a/Source/WTF/wtf/ThreadFunctionInvocation.h
+++ b/Source/WTF/wtf/ThreadFunctionInvocation.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WTF/wtf/ThreadIdentifierDataPthreads.cpp b/Source/WTF/wtf/ThreadIdentifierDataPthreads.cpp
index bd62288f8..0bebfe93b 100644
--- a/Source/WTF/wtf/ThreadIdentifierDataPthreads.cpp
+++ b/Source/WTF/wtf/ThreadIdentifierDataPthreads.cpp
@@ -56,7 +56,8 @@ ThreadIdentifierData::~ThreadIdentifierData()
void ThreadIdentifierData::initializeOnce()
{
- if (pthread_key_create(&m_key, destruct))
+ int error = pthread_key_create(&m_key, destruct);
+ if (error)
CRASH();
}
@@ -71,6 +72,10 @@ ThreadIdentifier ThreadIdentifierData::identifier()
void ThreadIdentifierData::initialize(ThreadIdentifier id)
{
ASSERT(!identifier());
+ // Ideally we'd have this as a release assert everywhere, but that would hurt performane.
+ // Having this release assert here means that we will catch "didn't call
+ // WTF::initializeThreading() soon enough" bugs in release mode.
+ RELEASE_ASSERT(m_key != PTHREAD_KEYS_MAX);
pthread_setspecific(m_key, new ThreadIdentifierData(id));
}
diff --git a/Source/WTF/wtf/ThreadSafeRefCounted.h b/Source/WTF/wtf/ThreadSafeRefCounted.h
index cf9eb3db3..892a058a9 100644
--- a/Source/WTF/wtf/ThreadSafeRefCounted.h
+++ b/Source/WTF/wtf/ThreadSafeRefCounted.h
@@ -1,66 +1,31 @@
/*
- * Copyright (C) 2007, 2008, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2008, 2010, 2013, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based
- * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license
- * is virtually identical to the Apple license above but is included here for completeness.
- *
- * Boost Software License - Version 1.0 - August 17th, 2003
- *
- * Permission is hereby granted, free of charge, to any person or organization
- * obtaining a copy of the software and accompanying documentation covered by
- * this license (the "Software") to use, reproduce, display, distribute,
- * execute, and transmit the Software, and to prepare derivative works of the
- * Software, and to permit third-parties to whom the Software is furnished to
- * do so, all subject to the following:
- *
- * The copyright notices in the Software and this entire statement, including
- * the above license grant, this restriction and the following disclaimer,
- * must be included in all copies of the Software, in whole or in part, and
- * all derivative works of the Software, unless such copies or derivative
- * works are solely in the form of machine-executable object code generated by
- * a source language processor.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
- * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
- * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ThreadSafeRefCounted_h
-#define ThreadSafeRefCounted_h
+#pragma once
#include <atomic>
-#include <wtf/DynamicAnnotations.h>
#include <wtf/FastMalloc.h>
#include <wtf/Noncopyable.h>
@@ -70,59 +35,46 @@ class ThreadSafeRefCountedBase {
WTF_MAKE_NONCOPYABLE(ThreadSafeRefCountedBase);
WTF_MAKE_FAST_ALLOCATED;
public:
- ThreadSafeRefCountedBase(int initialRefCount = 1)
- : m_refCount(initialRefCount)
- {
- }
+ ThreadSafeRefCountedBase() = default;
- void ref()
+ void ref() const
{
++m_refCount;
}
- bool hasOneRef()
+ bool hasOneRef() const
{
return refCount() == 1;
}
- int refCount() const
+ unsigned refCount() const
{
return m_refCount;
}
protected:
// Returns whether the pointer should be freed or not.
- bool derefBase()
+ bool derefBase() const
{
- WTF_ANNOTATE_HAPPENS_BEFORE(&m_refCount);
- if (--m_refCount <= 0) {
- WTF_ANNOTATE_HAPPENS_AFTER(&m_refCount);
- return true;
- }
-
- return false;
+ return !--m_refCount;
}
private:
- std::atomic<int> m_refCount;
+ mutable std::atomic<unsigned> m_refCount { 1 };
};
template<class T> class ThreadSafeRefCounted : public ThreadSafeRefCountedBase {
public:
- void deref()
+ void deref() const
{
if (derefBase())
- delete static_cast<T*>(this);
+ delete static_cast<const T*>(this);
}
protected:
- ThreadSafeRefCounted()
- {
- }
+ ThreadSafeRefCounted() = default;
};
} // namespace WTF
using WTF::ThreadSafeRefCounted;
-
-#endif // ThreadSafeRefCounted_h
diff --git a/Source/WTF/wtf/ThreadSpecific.h b/Source/WTF/wtf/ThreadSpecific.h
index 025737df2..f0904fcf6 100644
--- a/Source/WTF/wtf/ThreadSpecific.h
+++ b/Source/WTF/wtf/ThreadSpecific.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2016 Apple Inc. All rights reserved.
* Copyright (C) 2009 Jian Li <jianli@chromium.org>
* Copyright (C) 2012 Patrick Gansterer <paroga@paroga.com>
*
@@ -12,7 +12,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -42,6 +42,7 @@
#ifndef WTF_ThreadSpecific_h
#define WTF_ThreadSpecific_h
+#include <wtf/MainThread.h>
#include <wtf/Noncopyable.h>
#include <wtf/StdLibExtras.h>
@@ -53,13 +54,18 @@
namespace WTF {
-#if OS(WINDOWS)
-// ThreadSpecificThreadExit should be called each time when a thread is detached.
-// This is done automatically for threads created with WTF::createThread.
-void ThreadSpecificThreadExit();
+#if OS(WINDOWS) && CPU(X86)
+#define THREAD_SPECIFIC_CALL __stdcall
+#else
+#define THREAD_SPECIFIC_CALL
#endif
-template<typename T> class ThreadSpecific {
+enum class CanBeGCThread {
+ False,
+ True
+};
+
+template<typename T, CanBeGCThread canBeGCThread = CanBeGCThread::False> class ThreadSpecific {
WTF_MAKE_NONCOPYABLE(ThreadSpecific);
public:
ThreadSpecific();
@@ -73,10 +79,6 @@ public:
#endif
private:
-#if OS(WINDOWS)
- friend void ThreadSpecificThreadExit();
-#endif
-
// Not implemented. It's technically possible to destroy a thread specific key, but one would need
// to make sure that all values have been destroyed already (usually, that all threads that used it
// have exited). It's unlikely that any user of this call will be in that situation - and having
@@ -85,18 +87,15 @@ private:
T* get();
void set(T*);
- void static destroy(void* ptr);
+ void static THREAD_SPECIFIC_CALL destroy(void* ptr);
struct Data {
WTF_MAKE_NONCOPYABLE(Data);
public:
- Data(T* value, ThreadSpecific<T>* owner) : value(value), owner(owner) {}
+ Data(T* value, ThreadSpecific<T, canBeGCThread>* owner) : value(value), owner(owner) {}
T* value;
- ThreadSpecific<T>* owner;
-#if OS(WINDOWS)
- void (*destructor)(void*);
-#endif
+ ThreadSpecific<T, canBeGCThread>* owner;
};
#if USE(PTHREADS)
@@ -134,94 +133,113 @@ inline void* threadSpecificGet(ThreadSpecificKey key)
return pthread_getspecific(key);
}
-template<typename T>
-inline ThreadSpecific<T>::ThreadSpecific()
+template<typename T, CanBeGCThread canBeGCThread>
+inline ThreadSpecific<T, canBeGCThread>::ThreadSpecific()
{
int error = pthread_key_create(&m_key, destroy);
if (error)
CRASH();
}
-template<typename T>
-inline T* ThreadSpecific<T>::get()
+template<typename T, CanBeGCThread canBeGCThread>
+inline T* ThreadSpecific<T, canBeGCThread>::get()
{
Data* data = static_cast<Data*>(pthread_getspecific(m_key));
- return data ? data->value : 0;
+ if (data)
+ return data->value;
+ RELEASE_ASSERT(canBeGCThread == CanBeGCThread::True || !mayBeGCThread());
+ return nullptr;
}
-template<typename T>
-inline void ThreadSpecific<T>::set(T* ptr)
+template<typename T, CanBeGCThread canBeGCThread>
+inline void ThreadSpecific<T, canBeGCThread>::set(T* ptr)
{
+ RELEASE_ASSERT(canBeGCThread == CanBeGCThread::True || !mayBeGCThread());
ASSERT(!get());
pthread_setspecific(m_key, new Data(ptr, this));
}
#elif OS(WINDOWS)
-// TLS_OUT_OF_INDEXES is not defined on WinCE.
-#ifndef TLS_OUT_OF_INDEXES
-#define TLS_OUT_OF_INDEXES 0xffffffff
-#endif
-
-// The maximum number of TLS keys that can be created. For simplification, we assume that:
+// The maximum number of FLS keys that can be created. For simplification, we assume that:
// 1) Once the instance of ThreadSpecific<> is created, it will not be destructed until the program dies.
// 2) We do not need to hold many instances of ThreadSpecific<> data. This fixed number should be far enough.
-const int kMaxTlsKeySize = 256;
+const int kMaxFlsKeySize = 128;
+
+WTF_EXPORT_PRIVATE long& flsKeyCount();
+WTF_EXPORT_PRIVATE DWORD* flsKeys();
-WTF_EXPORT_PRIVATE long& tlsKeyCount();
-WTF_EXPORT_PRIVATE DWORD* tlsKeys();
+typedef DWORD ThreadSpecificKey;
-class PlatformThreadSpecificKey;
-typedef PlatformThreadSpecificKey* ThreadSpecificKey;
+inline void threadSpecificKeyCreate(ThreadSpecificKey* key, void (THREAD_SPECIFIC_CALL *destructor)(void *))
+{
+ DWORD flsKey = FlsAlloc(destructor);
+ if (flsKey == FLS_OUT_OF_INDEXES)
+ CRASH();
-WTF_EXPORT_PRIVATE void threadSpecificKeyCreate(ThreadSpecificKey*, void (*)(void *));
-WTF_EXPORT_PRIVATE void threadSpecificKeyDelete(ThreadSpecificKey);
-WTF_EXPORT_PRIVATE void threadSpecificSet(ThreadSpecificKey, void*);
-WTF_EXPORT_PRIVATE void* threadSpecificGet(ThreadSpecificKey);
+ *key = flsKey;
+}
+
+inline void threadSpecificKeyDelete(ThreadSpecificKey key)
+{
+ FlsFree(key);
+}
+
+inline void threadSpecificSet(ThreadSpecificKey key, void* data)
+{
+ FlsSetValue(key, data);
+}
+
+inline void* threadSpecificGet(ThreadSpecificKey key)
+{
+ return FlsGetValue(key);
+}
-template<typename T>
-inline ThreadSpecific<T>::ThreadSpecific()
+template<typename T, CanBeGCThread canBeGCThread>
+inline ThreadSpecific<T, canBeGCThread>::ThreadSpecific()
: m_index(-1)
{
- DWORD tlsKey = TlsAlloc();
- if (tlsKey == TLS_OUT_OF_INDEXES)
+ DWORD flsKey = FlsAlloc(destroy);
+ if (flsKey == FLS_OUT_OF_INDEXES)
CRASH();
- m_index = InterlockedIncrement(&tlsKeyCount()) - 1;
- if (m_index >= kMaxTlsKeySize)
+ m_index = InterlockedIncrement(&flsKeyCount()) - 1;
+ if (m_index >= kMaxFlsKeySize)
CRASH();
- tlsKeys()[m_index] = tlsKey;
+ flsKeys()[m_index] = flsKey;
}
-template<typename T>
-inline ThreadSpecific<T>::~ThreadSpecific()
+template<typename T, CanBeGCThread canBeGCThread>
+inline ThreadSpecific<T, canBeGCThread>::~ThreadSpecific()
{
- // Does not invoke destructor functions. They will be called from ThreadSpecificThreadExit when the thread is detached.
- TlsFree(tlsKeys()[m_index]);
+ FlsFree(flsKeys()[m_index]);
}
-template<typename T>
-inline T* ThreadSpecific<T>::get()
+template<typename T, CanBeGCThread canBeGCThread>
+inline T* ThreadSpecific<T, canBeGCThread>::get()
{
- Data* data = static_cast<Data*>(TlsGetValue(tlsKeys()[m_index]));
- return data ? data->value : 0;
+ Data* data = static_cast<Data*>(FlsGetValue(flsKeys()[m_index]));
+ if (data)
+ return data->value;
+ RELEASE_ASSERT(canBeGCThread == CanBeGCThread::True || !mayBeGCThread());
+ return nullptr;
}
-template<typename T>
-inline void ThreadSpecific<T>::set(T* ptr)
+template<typename T, CanBeGCThread canBeGCThread>
+inline void ThreadSpecific<T, canBeGCThread>::set(T* ptr)
{
+ RELEASE_ASSERT(canBeGCThread == CanBeGCThread::True || !mayBeGCThread());
ASSERT(!get());
Data* data = new Data(ptr, this);
- data->destructor = &ThreadSpecific<T>::destroy;
- TlsSetValue(tlsKeys()[m_index], data);
+ FlsSetValue(flsKeys()[m_index], data);
}
#else
#error ThreadSpecific is not implemented for this platform.
#endif
-template<typename T>
-inline void ThreadSpecific<T>::destroy(void* ptr)
+template<typename T, CanBeGCThread canBeGCThread>
+inline void THREAD_SPECIFIC_CALL ThreadSpecific<T, canBeGCThread>::destroy(void* ptr)
{
Data* data = static_cast<Data*>(ptr);
@@ -237,7 +255,7 @@ inline void ThreadSpecific<T>::destroy(void* ptr)
#if USE(PTHREADS)
pthread_setspecific(data->owner->m_key, 0);
#elif OS(WINDOWS)
- TlsSetValue(tlsKeys()[data->owner->m_index], 0);
+ FlsSetValue(flsKeys()[data->owner->m_index], 0);
#else
#error ThreadSpecific is not implemented for this platform.
#endif
@@ -245,14 +263,14 @@ inline void ThreadSpecific<T>::destroy(void* ptr)
delete data;
}
-template<typename T>
-inline bool ThreadSpecific<T>::isSet()
+template<typename T, CanBeGCThread canBeGCThread>
+inline bool ThreadSpecific<T, canBeGCThread>::isSet()
{
return !!get();
}
-template<typename T>
-inline ThreadSpecific<T>::operator T*()
+template<typename T, CanBeGCThread canBeGCThread>
+inline ThreadSpecific<T, canBeGCThread>::operator T*()
{
T* ptr = static_cast<T*>(get());
if (!ptr) {
@@ -265,21 +283,21 @@ inline ThreadSpecific<T>::operator T*()
return ptr;
}
-template<typename T>
-inline T* ThreadSpecific<T>::operator->()
+template<typename T, CanBeGCThread canBeGCThread>
+inline T* ThreadSpecific<T, canBeGCThread>::operator->()
{
return operator T*();
}
-template<typename T>
-inline T& ThreadSpecific<T>::operator*()
+template<typename T, CanBeGCThread canBeGCThread>
+inline T& ThreadSpecific<T, canBeGCThread>::operator*()
{
return *operator T*();
}
#if USE(WEB_THREAD)
-template<typename T>
-inline void ThreadSpecific<T>::replace(T* newPtr)
+template<typename T, CanBeGCThread canBeGCThread>
+inline void ThreadSpecific<T, canBeGCThread>::replace(T* newPtr)
{
ASSERT(newPtr);
Data* data = static_cast<Data*>(pthread_getspecific(m_key));
diff --git a/Source/WTF/wtf/ThreadSpecificWin.cpp b/Source/WTF/wtf/ThreadSpecificWin.cpp
index 9b70bbbca..ad7cf86d4 100644
--- a/Source/WTF/wtf/ThreadSpecificWin.cpp
+++ b/Source/WTF/wtf/ThreadSpecificWin.cpp
@@ -24,117 +24,22 @@
#if OS(WINDOWS)
-#include "StdLibExtras.h"
-#include "ThreadingPrimitives.h"
-#include <wtf/DoublyLinkedList.h>
-
#if !USE(PTHREADS)
namespace WTF {
-static DoublyLinkedList<PlatformThreadSpecificKey>& destructorsList()
-{
- static DoublyLinkedList<PlatformThreadSpecificKey> staticList;
- return staticList;
-}
-
-static Mutex& destructorsMutex()
-{
- static Mutex staticMutex;
- return staticMutex;
-}
-
-class PlatformThreadSpecificKey : public DoublyLinkedListNode<PlatformThreadSpecificKey> {
-public:
- friend class DoublyLinkedListNode<PlatformThreadSpecificKey>;
-
- PlatformThreadSpecificKey(void (*destructor)(void *))
- : m_destructor(destructor)
- {
- m_tlsKey = TlsAlloc();
- if (m_tlsKey == TLS_OUT_OF_INDEXES)
- CRASH();
- }
-
- ~PlatformThreadSpecificKey()
- {
- TlsFree(m_tlsKey);
- }
-
- void setValue(void* data) { TlsSetValue(m_tlsKey, data); }
- void* value() { return TlsGetValue(m_tlsKey); }
-
- void callDestructor()
- {
- if (void* data = value())
- m_destructor(data);
- }
-
-private:
- void (*m_destructor)(void *);
- DWORD m_tlsKey;
- PlatformThreadSpecificKey* m_prev;
- PlatformThreadSpecificKey* m_next;
-};
-
-long& tlsKeyCount()
+long& flsKeyCount()
{
static long count;
return count;
}
-DWORD* tlsKeys()
+DWORD* flsKeys()
{
- static DWORD keys[kMaxTlsKeySize];
+ static DWORD keys[kMaxFlsKeySize];
return keys;
}
-void threadSpecificKeyCreate(ThreadSpecificKey* key, void (*destructor)(void *))
-{
- // Use the original malloc() instead of fastMalloc() to use this function in FastMalloc code.
- *key = static_cast<PlatformThreadSpecificKey*>(::malloc(sizeof(PlatformThreadSpecificKey)));
- new (*key) PlatformThreadSpecificKey(destructor);
-
- MutexLocker locker(destructorsMutex());
- destructorsList().push(*key);
-}
-
-void threadSpecificKeyDelete(ThreadSpecificKey key)
-{
- MutexLocker locker(destructorsMutex());
- destructorsList().remove(key);
- key->~PlatformThreadSpecificKey();
- ::free(key);
-}
-
-void threadSpecificSet(ThreadSpecificKey key, void* data)
-{
- key->setValue(data);
-}
-
-void* threadSpecificGet(ThreadSpecificKey key)
-{
- return key->value();
-}
-
-void ThreadSpecificThreadExit()
-{
- for (long i = 0; i < tlsKeyCount(); i++) {
- // The layout of ThreadSpecific<T>::Data does not depend on T. So we are safe to do the static cast to ThreadSpecific<int> in order to access its data member.
- ThreadSpecific<int>::Data* data = static_cast<ThreadSpecific<int>::Data*>(TlsGetValue(tlsKeys()[i]));
- if (data)
- data->destructor(data);
- }
-
- MutexLocker locker(destructorsMutex());
- PlatformThreadSpecificKey* key = destructorsList().head();
- while (key) {
- PlatformThreadSpecificKey* nextKey = key->next();
- key->callDestructor();
- key = nextKey;
- }
-}
-
} // namespace WTF
#endif // !USE(PTHREADS)
diff --git a/Source/WTF/wtf/Threading.cpp b/Source/WTF/wtf/Threading.cpp
index 723a94ee4..c1dc35019 100644
--- a/Source/WTF/wtf/Threading.cpp
+++ b/Source/WTF/wtf/Threading.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -25,33 +25,56 @@
#include "config.h"
#include "Threading.h"
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
-#include <string.h>
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <wtf/text/StringView.h>
+
+#if HAVE(QOS_CLASSES)
+#include <bmalloc/bmalloc.h>
+#endif
namespace WTF {
struct NewThreadContext {
WTF_MAKE_FAST_ALLOCATED;
public:
- NewThreadContext(ThreadFunction entryPoint, void* data, const char* name)
- : entryPoint(entryPoint)
- , data(data)
- , name(name)
- {
- }
-
- ThreadFunction entryPoint;
- void* data;
const char* name;
-
+ std::function<void()> entryPoint;
Mutex creationMutex;
};
+const char* normalizeThreadName(const char* threadName)
+{
+#if HAVE(PTHREAD_SETNAME_NP)
+ return threadName;
+#else
+ // This name can be com.apple.WebKit.ProcessLauncher or com.apple.CoreIPC.ReceiveQueue.
+ // We are using those names for the thread name, but both are longer than the limit of
+ // the platform thread name length, 32 for Windows and 16 for Linux.
+ StringView result(threadName);
+ size_t size = result.reverseFind('.');
+ if (size != notFound)
+ result = result.substring(size + 1);
+
+#if OS(WINDOWS)
+ constexpr const size_t kVisualStudioThreadNameLimit = 32 - 1;
+ if (result.length() > kVisualStudioThreadNameLimit)
+ result = result.right(kVisualStudioThreadNameLimit);
+#elif OS(LINUX)
+ constexpr const size_t kLinuxThreadNameLimit = 16 - 1;
+ if (result.length() > kLinuxThreadNameLimit)
+ result = result.right(kLinuxThreadNameLimit);
+#endif
+ ASSERT(result.characters8()[result.length()] == '\0');
+ return reinterpret_cast<const char*>(result.characters8());
+#endif
+}
+
static void threadEntryPoint(void* contextData)
{
- NewThreadContext* context = reinterpret_cast<NewThreadContext*>(contextData);
+ NewThreadContext* context = static_cast<NewThreadContext*>(contextData);
// Block until our creating thread has completed any extra setup work, including
// establishing ThreadIdentifier.
@@ -61,24 +84,17 @@ static void threadEntryPoint(void* contextData)
initializeCurrentThreadInternal(context->name);
- // Grab the info that we need out of the context, then deallocate it.
- ThreadFunction entryPoint = context->entryPoint;
- void* data = context->data;
+ auto entryPoint = WTFMove(context->entryPoint);
+
+ // Delete the context before starting the thread.
delete context;
- entryPoint(data);
+ entryPoint();
}
-ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char* name)
+ThreadIdentifier createThread(const char* name, std::function<void()> entryPoint)
{
- // Visual Studio has a 31-character limit on thread names. Longer names will
- // be truncated silently, but we'd like callers to know about the limit.
-#if !LOG_DISABLED && PLATFORM(WIN)
- if (name && strlen(name) > 31)
- LOG_ERROR("Thread name \"%s\" is longer than 31 characters and will be truncated by Visual Studio", name);
-#endif
-
- NewThreadContext* context = new NewThreadContext(entryPoint, data, name);
+ NewThreadContext* context = new NewThreadContext { name, WTFMove(entryPoint), { } };
// Prevent the thread body from executing until we've established the thread identifier.
MutexLocker locker(context->creationMutex);
@@ -86,60 +102,49 @@ ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char*
return createThreadInternal(threadEntryPoint, context, name);
}
-#if PLATFORM(MAC) || PLATFORM(WIN)
-
-// For ABI compatibility with Safari on Mac / Windows: Safari uses the private
-// createThread() and waitForThreadCompletion() functions directly and we need
-// to keep the old ABI compatibility until it's been rebuilt.
-
-typedef void* (*ThreadFunctionWithReturnValue)(void* argument);
-
-WTF_EXPORT_PRIVATE ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data, const char* name);
-
-struct ThreadFunctionWithReturnValueInvocation {
- ThreadFunctionWithReturnValueInvocation(ThreadFunctionWithReturnValue function, void* data)
- : function(function)
- , data(data)
- {
- }
-
- ThreadFunctionWithReturnValue function;
- void* data;
-};
-
-static void compatEntryPoint(void* param)
+ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char* name)
{
- // Balanced by .release() in createThread.
- auto invocation = std::unique_ptr<ThreadFunctionWithReturnValueInvocation>(static_cast<ThreadFunctionWithReturnValueInvocation*>(param));
- invocation->function(invocation->data);
+ return createThread(name, [entryPoint, data] {
+ entryPoint(data);
+ });
}
-ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data, const char* name)
+void setCurrentThreadIsUserInteractive(int relativePriority)
{
- auto invocation = std::make_unique<ThreadFunctionWithReturnValueInvocation>(entryPoint, data);
-
- // Balanced by std::unique_ptr constructor in compatEntryPoint.
- return createThread(compatEntryPoint, invocation.release(), name);
+#if HAVE(QOS_CLASSES)
+ ASSERT(relativePriority <= 0);
+ ASSERT(relativePriority >= QOS_MIN_RELATIVE_PRIORITY);
+ pthread_set_qos_class_self_np(adjustedQOSClass(QOS_CLASS_USER_INTERACTIVE), relativePriority);
+#else
+ UNUSED_PARAM(relativePriority);
+#endif
}
-WTF_EXPORT_PRIVATE int waitForThreadCompletion(ThreadIdentifier, void**);
-
-int waitForThreadCompletion(ThreadIdentifier threadID, void**)
+void setCurrentThreadIsUserInitiated(int relativePriority)
{
- return waitForThreadCompletion(threadID);
+#if HAVE(QOS_CLASSES)
+ ASSERT(relativePriority <= 0);
+ ASSERT(relativePriority >= QOS_MIN_RELATIVE_PRIORITY);
+ pthread_set_qos_class_self_np(adjustedQOSClass(QOS_CLASS_USER_INITIATED), relativePriority);
+#else
+ UNUSED_PARAM(relativePriority);
+#endif
}
-// This function is deprecated but needs to be kept around for backward
-// compatibility. Use the 3-argument version of createThread above.
-
-WTF_EXPORT_PRIVATE ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data);
+#if HAVE(QOS_CLASSES)
+static qos_class_t globalMaxQOSclass { QOS_CLASS_UNSPECIFIED };
-ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data)
+void setGlobalMaxQOSClass(qos_class_t maxClass)
{
- auto invocation = std::make_unique<ThreadFunctionWithReturnValueInvocation>(entryPoint, data);
+ bmalloc::api::setScavengerThreadQOSClass(maxClass);
+ globalMaxQOSclass = maxClass;
+}
- // Balanced by adoptPtr() in compatEntryPoint.
- return createThread(compatEntryPoint, invocation.release(), 0);
+qos_class_t adjustedQOSClass(qos_class_t originalClass)
+{
+ if (globalMaxQOSclass != QOS_CLASS_UNSPECIFIED)
+ return std::min(originalClass, globalMaxQOSclass);
+ return originalClass;
}
#endif
diff --git a/Source/WTF/wtf/Threading.h b/Source/WTF/wtf/Threading.h
index 31f9d2f34..4cd1593e8 100644
--- a/Source/WTF/wtf/Threading.h
+++ b/Source/WTF/wtf/Threading.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007, 2008, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2008, 2010, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -25,42 +25,16 @@
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based
- * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license
- * is virtually identical to the Apple license above but is included here for completeness.
- *
- * Boost Software License - Version 1.0 - August 17th, 2003
- *
- * Permission is hereby granted, free of charge, to any person or organization
- * obtaining a copy of the software and accompanying documentation covered by
- * this license (the "Software") to use, reproduce, display, distribute,
- * execute, and transmit the Software, and to prepare derivative works of the
- * Software, and to permit third-parties to whom the Software is furnished to
- * do so, all subject to the following:
- *
- * The copyright notices in the Software and this entire statement, including
- * the above license grant, this restriction and the following disclaimer,
- * must be included in all copies of the Software, in whole or in part, and
- * all derivative works of the Software, unless such copies or derivative
- * works are solely in the form of machine-executable object code generated by
- * a source language processor.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
- * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
- * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef Threading_h
#define Threading_h
-#include <wtf/Platform.h>
+// FIXME: Not sure why there are so many includes here.
+// Is this intended to be convenience so that others don't have to include the individual files?
+// Nothing in this header depends on Assertions, Atomics, Locker, Noncopyable, ThreadSafeRefCounted, or ThreadingPrimitives.
+#include <functional>
#include <stdint.h>
#include <wtf/Assertions.h>
#include <wtf/Atomics.h>
@@ -81,6 +55,19 @@ WTF_EXPORT_PRIVATE void initializeThreading();
// Returns 0 if thread creation failed.
// The thread name must be a literal since on some platforms it's passed in to the thread.
+WTF_EXPORT_PRIVATE ThreadIdentifier createThread(const char* threadName, std::function<void()>);
+
+// Mark the current thread as requiring UI responsiveness.
+// relativePriority is a value in the range [-15, 0] where a lower value indicates a lower priority.
+WTF_EXPORT_PRIVATE void setCurrentThreadIsUserInteractive(int relativePriority = 0);
+WTF_EXPORT_PRIVATE void setCurrentThreadIsUserInitiated(int relativePriority = 0);
+
+WTF_EXPORT_PRIVATE ThreadIdentifier currentThread();
+WTF_EXPORT_PRIVATE void changeThreadPriority(ThreadIdentifier, int);
+WTF_EXPORT_PRIVATE int waitForThreadCompletion(ThreadIdentifier);
+WTF_EXPORT_PRIVATE void detachThread(ThreadIdentifier);
+
+// Deprecated function-pointer-based thread creation.
WTF_EXPORT_PRIVATE ThreadIdentifier createThread(ThreadFunction, void*, const char* threadName);
// Internal platform-specific createThread implementation.
@@ -90,16 +77,25 @@ ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char* threadN
// Helpful for platforms where the thread name must be set from within the thread.
void initializeCurrentThreadInternal(const char* threadName);
-WTF_EXPORT_PRIVATE ThreadIdentifier currentThread();
-WTF_EXPORT_PRIVATE int waitForThreadCompletion(ThreadIdentifier);
-WTF_EXPORT_PRIVATE void detachThread(ThreadIdentifier);
+const char* normalizeThreadName(const char* threadName);
+
+#if HAVE(QOS_CLASSES)
+WTF_EXPORT_PRIVATE void setGlobalMaxQOSClass(qos_class_t);
+WTF_EXPORT_PRIVATE qos_class_t adjustedQOSClass(qos_class_t);
+#endif
} // namespace WTF
using WTF::ThreadIdentifier;
using WTF::createThread;
using WTF::currentThread;
+using WTF::changeThreadPriority;
using WTF::detachThread;
using WTF::waitForThreadCompletion;
+#if HAVE(QOS_CLASSES)
+using WTF::setGlobalMaxQOSClass;
+using WTF::adjustedQOSClass;
+#endif
+
#endif // Threading_h
diff --git a/Source/WTF/wtf/ThreadingPrimitives.h b/Source/WTF/wtf/ThreadingPrimitives.h
index 58df2162d..422923191 100644
--- a/Source/WTF/wtf/ThreadingPrimitives.h
+++ b/Source/WTF/wtf/ThreadingPrimitives.h
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,8 +31,6 @@
#ifndef ThreadingPrimitives_h
#define ThreadingPrimitives_h
-#include <wtf/Platform.h>
-
#include <wtf/Assertions.h>
#include <wtf/FastMalloc.h>
#include <wtf/Locker.h>
diff --git a/Source/WTF/wtf/ThreadingPthreads.cpp b/Source/WTF/wtf/ThreadingPthreads.cpp
index dab2d447f..646164a53 100644
--- a/Source/WTF/wtf/ThreadingPthreads.cpp
+++ b/Source/WTF/wtf/ThreadingPthreads.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2009, 2015 Apple Inc. All rights reserved.
* Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
* Copyright (C) 2011 Research In Motion Limited. All rights reserved.
*
@@ -12,7 +12,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -39,13 +39,13 @@
#include "dtoa/cached-powers.h"
#include "HashMap.h"
#include "RandomNumberSeed.h"
-#include "StackStats.h"
#include "StdLibExtras.h"
#include "ThreadFunctionInvocation.h"
#include "ThreadIdentifierDataPthreads.h"
#include "ThreadSpecific.h"
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
+#include <wtf/DataLog.h>
+#include <wtf/NeverDestroyed.h>
+#include <wtf/RawPointer.h>
#include <wtf/WTFThreadData.h>
#include <errno.h>
@@ -55,8 +55,8 @@
#include <sys/time.h>
#endif
-#if OS(MAC_OS_X)
-#include <objc/objc-auto.h>
+#if OS(LINUX)
+#include <sys/prctl.h>
#endif
namespace WTF {
@@ -104,7 +104,7 @@ void threadWasJoined(ThreadIdentifier);
static Mutex& threadMapMutex()
{
- DEFINE_STATIC_LOCAL(Mutex, mutex, ());
+ static NeverDestroyed<Mutex> mutex;
return mutex;
}
@@ -124,15 +124,13 @@ void initializeThreading()
threadMapMutex();
initializeRandomNumberGenerator();
ThreadIdentifierData::initializeOnce();
- StackStats::initialize();
wtfThreadData();
- s_dtoaP5Mutex = new Mutex;
initializeDates();
}
static ThreadMap& threadMap()
{
- DEFINE_STATIC_LOCAL(ThreadMap, map, ());
+ static NeverDestroyed<ThreadMap> map;
return map;
}
@@ -175,7 +173,14 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
{
auto invocation = std::make_unique<ThreadFunctionInvocation>(entryPoint, data);
pthread_t threadHandle;
- if (pthread_create(&threadHandle, 0, wtfThreadEntryPoint, invocation.get())) {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+#if HAVE(QOS_CLASSES)
+ pthread_attr_set_qos_class_np(&attr, adjustedQOSClass(QOS_CLASS_USER_INITIATED), 0);
+#endif
+ int error = pthread_create(&threadHandle, &attr, wtfThreadEntryPoint, invocation.get());
+ pthread_attr_destroy(&attr);
+ if (error) {
LOG_ERROR("Failed to create pthread at entry point %p with data %p", wtfThreadEntryPoint, invocation.get());
return 0;
}
@@ -190,21 +195,39 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
void initializeCurrentThreadInternal(const char* threadName)
{
#if HAVE(PTHREAD_SETNAME_NP)
- pthread_setname_np(threadName);
+ pthread_setname_np(normalizeThreadName(threadName));
+#elif OS(LINUX)
+ prctl(PR_SET_NAME, normalizeThreadName(threadName));
#else
UNUSED_PARAM(threadName);
#endif
-#if OS(MAC_OS_X)
- // All threads that potentially use APIs above the BSD layer must be registered with the Objective-C
- // garbage collector in case API implementations use garbage-collected memory.
- objc_registerThreadWithCollector();
-#endif
-
ThreadIdentifier id = identifierByPthreadHandle(pthread_self());
ASSERT(id);
ThreadIdentifierData::initialize(id);
}
+
+void changeThreadPriority(ThreadIdentifier threadID, int delta)
+{
+ pthread_t pthreadHandle;
+ ASSERT(threadID);
+
+ {
+ MutexLocker locker(threadMapMutex());
+ pthreadHandle = pthreadHandleForIdentifierWithLockAlreadyHeld(threadID);
+ ASSERT(pthreadHandle);
+ }
+
+ int policy;
+ struct sched_param param;
+
+ if (pthread_getschedparam(pthreadHandle, &policy, &param))
+ return;
+
+ param.sched_priority += delta;
+
+ pthread_setschedparam(pthreadHandle, policy, &param);
+}
int waitForThreadCompletion(ThreadIdentifier threadID)
{
diff --git a/Source/WTF/wtf/ThreadingWin.cpp b/Source/WTF/wtf/ThreadingWin.cpp
index e37c77e07..54b43efef 100644
--- a/Source/WTF/wtf/ThreadingWin.cpp
+++ b/Source/WTF/wtf/ThreadingWin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2008, 2015 Apple Inc. All rights reserved.
* Copyright (C) 2009 Google Inc. All rights reserved.
* Copyright (C) 2009 Torch Mobile, Inc. All rights reserved.
*
@@ -12,7 +12,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -94,23 +94,15 @@
#include "MainThread.h"
#include "ThreadFunctionInvocation.h"
+#include <process.h>
#include <windows.h>
#include <wtf/CurrentTime.h>
#include <wtf/HashMap.h>
#include <wtf/MathExtras.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
+#include <wtf/NeverDestroyed.h>
#include <wtf/RandomNumberSeed.h>
#include <wtf/WTFThreadData.h>
-#if !USE(PTHREADS) && OS(WINDOWS)
-#include "ThreadSpecific.h"
-#endif
-
-#if !OS(WINCE)
-#include <process.h>
-#endif
-
#if HAVE(ERRNO_H)
#include <errno.h>
#endif
@@ -137,7 +129,7 @@ void initializeCurrentThreadInternal(const char* szThreadName)
#else
THREADNAME_INFO info;
info.dwType = 0x1000;
- info.szName = szThreadName;
+ info.szName = normalizeThreadName(szThreadName);
info.dwThreadID = GetCurrentThreadId();
info.dwFlags = 0;
@@ -150,7 +142,7 @@ void initializeCurrentThreadInternal(const char* szThreadName)
static Mutex& threadMapMutex()
{
- static Mutex mutex;
+ static NeverDestroyed<Mutex> mutex;
return mutex;
}
@@ -170,13 +162,12 @@ void initializeThreading()
threadMapMutex();
initializeRandomNumberGenerator();
wtfThreadData();
- s_dtoaP5Mutex = new Mutex;
initializeDates();
}
static HashMap<DWORD, HANDLE>& threadMap()
{
- static HashMap<DWORD, HANDLE> map;
+ static NeverDestroyed<HashMap<DWORD, HANDLE>> map;
return map;
}
@@ -202,14 +193,9 @@ static void clearThreadHandleForIdentifier(ThreadIdentifier id)
static unsigned __stdcall wtfThreadEntryPoint(void* param)
{
- OwnPtr<ThreadFunctionInvocation> invocation = adoptPtr(static_cast<ThreadFunctionInvocation*>(param));
+ std::unique_ptr<ThreadFunctionInvocation> invocation(static_cast<ThreadFunctionInvocation*>(param));
invocation->function(invocation->data);
-#if !USE(PTHREADS) && OS(WINDOWS)
- // Do the TLS cleanup.
- ThreadSpecificThreadExit();
-#endif
-
return 0;
}
@@ -217,18 +203,10 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
{
unsigned threadIdentifier = 0;
ThreadIdentifier threadID = 0;
- OwnPtr<ThreadFunctionInvocation> invocation = adoptPtr(new ThreadFunctionInvocation(entryPoint, data));
-#if OS(WINCE)
- // This is safe on WINCE, since CRT is in the core and innately multithreaded.
- // On desktop Windows, need to use _beginthreadex (not available on WinCE) if using any CRT functions
- HANDLE threadHandle = CreateThread(0, 0, (LPTHREAD_START_ROUTINE)wtfThreadEntryPoint, invocation.get(), 0, (LPDWORD)&threadIdentifier);
-#else
+ auto invocation = std::make_unique<ThreadFunctionInvocation>(entryPoint, data);
HANDLE threadHandle = reinterpret_cast<HANDLE>(_beginthreadex(0, 0, wtfThreadEntryPoint, invocation.get(), 0, &threadIdentifier));
-#endif
if (!threadHandle) {
-#if OS(WINCE)
- LOG_ERROR("Failed to create thread at entry point %p with data %p: %ld", entryPoint, data, ::GetLastError());
-#elif !HAVE(ERRNO_H)
+#if !HAVE(ERRNO_H)
LOG_ERROR("Failed to create thread at entry point %p with data %p.", entryPoint, data);
#else
LOG_ERROR("Failed to create thread at entry point %p with data %p: %ld", entryPoint, data, errno);
@@ -237,7 +215,7 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
}
// The thread will take ownership of invocation.
- ThreadFunctionInvocation* leakedInvocation = invocation.leakPtr();
+ ThreadFunctionInvocation* leakedInvocation = invocation.release();
UNUSED_PARAM(leakedInvocation);
threadID = static_cast<ThreadIdentifier>(threadIdentifier);
@@ -246,6 +224,17 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
return threadID;
}
+void changeThreadPriority(ThreadIdentifier threadID, int delta)
+{
+ ASSERT(threadID);
+
+ HANDLE threadHandle = threadHandleForIdentifier(threadID);
+ if (!threadHandle)
+ LOG_ERROR("ThreadIdentifier %u does not correspond to an active thread", threadID);
+
+ SetThreadPriority(threadHandle, THREAD_PRIORITY_NORMAL + delta);
+}
+
int waitForThreadCompletion(ThreadIdentifier threadID)
{
ASSERT(threadID);
@@ -296,6 +285,7 @@ void Mutex::lock()
++m_mutex.m_recursionCount;
}
+#pragma warning(suppress: 26115)
bool Mutex::tryLock()
{
// This method is modeled after the behavior of pthread_mutex_trylock,
diff --git a/Source/WTF/wtf/TimeWithDynamicClockType.cpp b/Source/WTF/wtf/TimeWithDynamicClockType.cpp
new file mode 100644
index 000000000..789099f67
--- /dev/null
+++ b/Source/WTF/wtf/TimeWithDynamicClockType.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "TimeWithDynamicClockType.h"
+
+#include "Condition.h"
+#include "Lock.h"
+#include "PrintStream.h"
+#include <cfloat>
+#include <cmath>
+#include <wtf/DataLog.h>
+
+namespace WTF {
+
+TimeWithDynamicClockType TimeWithDynamicClockType::now(ClockType type)
+{
+ switch (type) {
+ case ClockType::Wall:
+ return WallTime::now();
+ case ClockType::Monotonic:
+ return MonotonicTime::now();
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return TimeWithDynamicClockType();
+}
+
+TimeWithDynamicClockType TimeWithDynamicClockType::nowWithSameClock() const
+{
+ return now(clockType());
+}
+
+WallTime TimeWithDynamicClockType::wallTime() const
+{
+ RELEASE_ASSERT(m_type == ClockType::Wall);
+ return WallTime::fromRawSeconds(m_value);
+}
+
+MonotonicTime TimeWithDynamicClockType::monotonicTime() const
+{
+ RELEASE_ASSERT(m_type == ClockType::Monotonic);
+ return MonotonicTime::fromRawSeconds(m_value);
+}
+
+WallTime TimeWithDynamicClockType::approximateWallTime() const
+{
+ switch (m_type) {
+ case ClockType::Wall:
+ return wallTime();
+ case ClockType::Monotonic:
+ return monotonicTime().approximateWallTime();
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return WallTime();
+}
+
+MonotonicTime TimeWithDynamicClockType::approximateMonotonicTime() const
+{
+ switch (m_type) {
+ case ClockType::Wall:
+ return wallTime().approximateMonotonicTime();
+ case ClockType::Monotonic:
+ return monotonicTime();
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return MonotonicTime();
+}
+
+Seconds TimeWithDynamicClockType::operator-(const TimeWithDynamicClockType& other) const
+{
+ RELEASE_ASSERT(m_type == other.m_type);
+ return Seconds(m_value - other.m_value);
+}
+
+bool TimeWithDynamicClockType::operator<(const TimeWithDynamicClockType& other) const
+{
+ RELEASE_ASSERT(m_type == other.m_type);
+ return m_value < other.m_value;
+}
+
+bool TimeWithDynamicClockType::operator>(const TimeWithDynamicClockType& other) const
+{
+ RELEASE_ASSERT(m_type == other.m_type);
+ return m_value > other.m_value;
+}
+
+bool TimeWithDynamicClockType::operator<=(const TimeWithDynamicClockType& other) const
+{
+ RELEASE_ASSERT(m_type == other.m_type);
+ return m_value <= other.m_value;
+}
+
+bool TimeWithDynamicClockType::operator>=(const TimeWithDynamicClockType& other) const
+{
+ RELEASE_ASSERT(m_type == other.m_type);
+ return m_value >= other.m_value;
+}
+
+void TimeWithDynamicClockType::dump(PrintStream& out) const
+{
+ out.print(m_type, "(", m_value, " sec)");
+}
+
+void sleep(const TimeWithDynamicClockType& time)
+{
+ Lock fakeLock;
+ Condition fakeCondition;
+ LockHolder fakeLocker(fakeLock);
+ fakeCondition.waitUntil(fakeLock, time);
+}
+
+bool hasElapsed(const TimeWithDynamicClockType& time)
+{
+ // Avoid doing now().
+ if (!(time > time.withSameClockAndRawSeconds(0)))
+ return true;
+ if (std::isinf(time.secondsSinceEpoch().value()))
+ return false;
+
+ return time <= time.nowWithSameClock();
+}
+
+} // namespace WTF
+
+
diff --git a/Source/WTF/wtf/TimeWithDynamicClockType.h b/Source/WTF/wtf/TimeWithDynamicClockType.h
new file mode 100644
index 000000000..0f25c4e9a
--- /dev/null
+++ b/Source/WTF/wtf/TimeWithDynamicClockType.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_TimeWithDynamicClockType_h
+#define WTF_TimeWithDynamicClockType_h
+
+#include <wtf/ClockType.h>
+#include <wtf/MonotonicTime.h>
+#include <wtf/WallTime.h>
+
+namespace WTF {
+
+class PrintStream;
+
+class TimeWithDynamicClockType {
+public:
+ TimeWithDynamicClockType() { }
+
+ TimeWithDynamicClockType(WallTime time)
+ : m_value(time.secondsSinceEpoch().value())
+ , m_type(ClockType::Wall)
+ {
+ }
+
+ TimeWithDynamicClockType(MonotonicTime time)
+ : m_value(time.secondsSinceEpoch().value())
+ , m_type(ClockType::Monotonic)
+ {
+ }
+
+ static TimeWithDynamicClockType fromRawSeconds(double value, ClockType type)
+ {
+ TimeWithDynamicClockType result;
+ result.m_value = value;
+ result.m_type = type;
+ return result;
+ }
+
+ Seconds secondsSinceEpoch() const { return Seconds(m_value); }
+ ClockType clockType() const { return m_type; }
+
+ WTF_EXPORT_PRIVATE static TimeWithDynamicClockType now(ClockType);
+
+ WTF_EXPORT_PRIVATE TimeWithDynamicClockType nowWithSameClock() const;
+
+ TimeWithDynamicClockType withSameClockAndRawSeconds(double value) const
+ {
+ return TimeWithDynamicClockType::fromRawSeconds(value, clockType());
+ }
+
+ // Asserts that the time is of the type you want.
+ WTF_EXPORT_PRIVATE WallTime wallTime() const;
+ WTF_EXPORT_PRIVATE MonotonicTime monotonicTime() const;
+
+ WTF_EXPORT_PRIVATE WallTime approximateWallTime() const;
+ WTF_EXPORT_PRIVATE MonotonicTime approximateMonotonicTime() const;
+
+ explicit operator bool() const { return !!m_value; }
+
+ TimeWithDynamicClockType operator+(Seconds other) const
+ {
+ return withSameClockAndRawSeconds(m_value + other.value());
+ }
+
+ TimeWithDynamicClockType operator-(Seconds other) const
+ {
+ return withSameClockAndRawSeconds(m_value - other.value());
+ }
+
+ // Time is a scalar and scalars can be negated as this could arise from algebraic
+ // transformations. So, we allow it.
+ TimeWithDynamicClockType operator-() const
+ {
+ return withSameClockAndRawSeconds(-m_value);
+ }
+
+ TimeWithDynamicClockType operator+=(Seconds other)
+ {
+ return *this = *this + other;
+ }
+
+ TimeWithDynamicClockType operator-=(Seconds other)
+ {
+ return *this = *this - other;
+ }
+
+ WTF_EXPORT_PRIVATE Seconds operator-(const TimeWithDynamicClockType&) const;
+
+ bool operator==(const TimeWithDynamicClockType& other) const
+ {
+ return m_value == other.m_value
+ && m_type == other.m_type;
+ }
+
+ bool operator!=(const TimeWithDynamicClockType& other) const
+ {
+ return !(*this == other);
+ }
+
+ // To do relative comparisons, you must be using times with the same clock type.
+ WTF_EXPORT_PRIVATE bool operator<(const TimeWithDynamicClockType&) const;
+ WTF_EXPORT_PRIVATE bool operator>(const TimeWithDynamicClockType&) const;
+ WTF_EXPORT_PRIVATE bool operator<=(const TimeWithDynamicClockType&) const;
+ WTF_EXPORT_PRIVATE bool operator>=(const TimeWithDynamicClockType&) const;
+
+ WTF_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+private:
+ double m_value { 0 };
+ ClockType m_type { ClockType::Wall };
+};
+
+WTF_EXPORT_PRIVATE void sleep(const TimeWithDynamicClockType&);
+
+WTF_EXPORT_PRIVATE bool hasElapsed(const TimeWithDynamicClockType&);
+
+} // namespace WTF
+
+using WTF::TimeWithDynamicClockType;
+using WTF::hasElapsed;
+using WTF::sleep;
+
+#endif // WTF_TimeWithDynamicClockType_h
diff --git a/Source/WTF/wtf/TinyLRUCache.h b/Source/WTF/wtf/TinyLRUCache.h
new file mode 100644
index 000000000..91b31f9dd
--- /dev/null
+++ b/Source/WTF/wtf/TinyLRUCache.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TinyLRUCache_h
+#define TinyLRUCache_h
+
+#include <wtf/NeverDestroyed.h>
+#include <wtf/Vector.h>
+
+namespace WTF {
+
+template<typename KeyType, typename ValueType>
+struct TinyLRUCachePolicy {
+ static bool isKeyNull(const KeyType&) { return false; }
+ static ValueType createValueForNullKey() { return { }; }
+ static ValueType createValueForKey(const KeyType&) { return { }; }
+};
+
+template<typename KeyType, typename ValueType, size_t capacity = 4, typename Policy = TinyLRUCachePolicy<KeyType, ValueType>>
+class TinyLRUCache {
+public:
+ const ValueType& get(const KeyType& key)
+ {
+ if (Policy::isKeyNull(key)) {
+ static NeverDestroyed<ValueType> valueForNull = Policy::createValueForNullKey();
+ return valueForNull;
+ }
+
+ for (size_t i = 0; i < m_cache.size(); ++i) {
+ if (m_cache[i].first != key)
+ continue;
+
+ if (i == m_cache.size() - 1)
+ return m_cache[i].second;
+
+ // If the entry is not the last one, move it to the end of the cache.
+ Entry entry = WTFMove(m_cache[i]);
+ m_cache.remove(i);
+ m_cache.append(WTFMove(entry));
+ return m_cache[m_cache.size() - 1].second;
+ }
+
+ // m_cache[0] is the LRU entry, so remove it.
+ if (m_cache.size() == capacity)
+ m_cache.remove(0);
+
+ m_cache.append(std::make_pair(key, Policy::createValueForKey(key)));
+ return m_cache.last().second;
+ }
+
+private:
+ typedef std::pair<KeyType, ValueType> Entry;
+ typedef Vector<Entry, capacity> Cache;
+ Cache m_cache;
+};
+
+}
+
+using WTF::TinyLRUCache;
+using WTF::TinyLRUCachePolicy;
+
+#endif // TinyLRUCache_h
diff --git a/Source/WTF/wtf/TinyPtrSet.h b/Source/WTF/wtf/TinyPtrSet.h
new file mode 100644
index 000000000..f67e9447e
--- /dev/null
+++ b/Source/WTF/wtf/TinyPtrSet.h
@@ -0,0 +1,521 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TinyPtrSet_h
+#define TinyPtrSet_h
+
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+
+namespace JSC { namespace DFG {
+class StructureAbstractValue;
+} } // namespace JSC::DFG
+
+namespace WTF {
+
+// FIXME: This currently only works for types that are pointer-like: they should have the size
+// of a pointer and like a pointer they should not have assignment operators, copy constructors,
+// non-trivial default constructors, and non-trivial destructors. It may be possible to lift all
+// of these restrictions. If we succeeded then this should be renamed to just TinySet.
+// https://bugs.webkit.org/show_bug.cgi?id=145741
+
+template<typename T>
+class TinyPtrSet {
+ static_assert(sizeof(T) == sizeof(void*), "It's in the title of the class.");
+public:
+ TinyPtrSet()
+ : m_pointer(0)
+ {
+ setEmpty();
+ }
+
+ TinyPtrSet(T element)
+ : m_pointer(0)
+ {
+ set(element);
+ }
+
+ ALWAYS_INLINE TinyPtrSet(const TinyPtrSet& other)
+ : m_pointer(0)
+ {
+ copyFrom(other);
+ }
+
+ ALWAYS_INLINE TinyPtrSet& operator=(const TinyPtrSet& other)
+ {
+ if (this == &other)
+ return *this;
+ deleteListIfNecessary();
+ copyFrom(other);
+ return *this;
+ }
+
+ ~TinyPtrSet()
+ {
+ deleteListIfNecessary();
+ }
+
+ void clear()
+ {
+ deleteListIfNecessary();
+ setEmpty();
+ }
+
+ // Returns the only entry if the array has exactly one entry.
+ T onlyEntry() const
+ {
+ if (isThin())
+ return singleEntry();
+ OutOfLineList* list = this->list();
+ if (list->m_length != 1)
+ return T();
+ return list->list()[0];
+ }
+
+ bool isEmpty() const
+ {
+ bool result = isThin() && !singleEntry();
+ if (result)
+ ASSERT(m_pointer != reservedValue);
+ return result;
+ }
+
+ // Returns true if the value was added, or false if the value was already there.
+ bool add(T value)
+ {
+ ASSERT(value);
+ if (isThin()) {
+ if (singleEntry() == value)
+ return false;
+ if (!singleEntry()) {
+ set(value);
+ return true;
+ }
+
+ OutOfLineList* list = OutOfLineList::create(defaultStartingSize);
+ list->m_length = 2;
+ list->list()[0] = singleEntry();
+ list->list()[1] = value;
+ set(list);
+ return true;
+ }
+
+ return addOutOfLine(value);
+ }
+
+ bool remove(T value)
+ {
+ if (isThin()) {
+ if (singleEntry() == value) {
+ setEmpty();
+ return true;
+ }
+ return false;
+ }
+
+ OutOfLineList* list = this->list();
+ for (unsigned i = 0; i < list->m_length; ++i) {
+ if (list->list()[i] != value)
+ continue;
+ list->list()[i] = list->list()[--list->m_length];
+ if (!list->m_length) {
+ OutOfLineList::destroy(list);
+ setEmpty();
+ }
+ return true;
+ }
+ return false;
+ }
+
+ bool contains(T value) const
+ {
+ if (isThin())
+ return singleEntry() == value;
+ return containsOutOfLine(value);
+ }
+
+ bool merge(const TinyPtrSet& other)
+ {
+ if (other.isThin()) {
+ if (other.singleEntry())
+ return add(other.singleEntry());
+ return false;
+ }
+
+ OutOfLineList* list = other.list();
+ if (list->m_length >= 2) {
+ if (isThin()) {
+ OutOfLineList* myNewList = OutOfLineList::create(
+ list->m_length + !!singleEntry());
+ if (singleEntry()) {
+ myNewList->m_length = 1;
+ myNewList->list()[0] = singleEntry();
+ }
+ set(myNewList);
+ }
+ bool changed = false;
+ for (unsigned i = 0; i < list->m_length; ++i)
+ changed |= addOutOfLine(list->list()[i]);
+ return changed;
+ }
+
+ ASSERT(list->m_length);
+ return add(list->list()[0]);
+ }
+
+ template<typename Functor>
+ void forEach(const Functor& functor) const
+ {
+ if (isThin()) {
+ if (!singleEntry())
+ return;
+ functor(singleEntry());
+ return;
+ }
+
+ OutOfLineList* list = this->list();
+ for (unsigned i = 0; i < list->m_length; ++i)
+ functor(list->list()[i]);
+ }
+
+ template<typename Functor>
+ void genericFilter(const Functor& functor)
+ {
+ if (isThin()) {
+ if (!singleEntry())
+ return;
+ if (functor(singleEntry()))
+ return;
+ clear();
+ return;
+ }
+
+ OutOfLineList* list = this->list();
+ for (unsigned i = 0; i < list->m_length; ++i) {
+ if (functor(list->list()[i]))
+ continue;
+ list->list()[i--] = list->list()[--list->m_length];
+ }
+ if (!list->m_length)
+ clear();
+ }
+
+ void filter(const TinyPtrSet& other)
+ {
+ if (other.isThin()) {
+ if (!other.singleEntry() || !contains(other.singleEntry()))
+ clear();
+ else {
+ clear();
+ set(other.singleEntry());
+ }
+ return;
+ }
+
+ genericFilter([&] (T value) { return other.containsOutOfLine(value); });
+ }
+
+ void exclude(const TinyPtrSet& other)
+ {
+ if (other.isThin()) {
+ if (other.singleEntry())
+ remove(other.singleEntry());
+ return;
+ }
+
+ genericFilter([&] (T value) { return !other.containsOutOfLine(value); });
+ }
+
+ bool isSubsetOf(const TinyPtrSet& other) const
+ {
+ if (isThin()) {
+ if (!singleEntry())
+ return true;
+ return other.contains(singleEntry());
+ }
+
+ if (other.isThin()) {
+ if (!other.singleEntry())
+ return false;
+ OutOfLineList* list = this->list();
+ if (list->m_length >= 2)
+ return false;
+ if (list->list()[0] == other.singleEntry())
+ return true;
+ return false;
+ }
+
+ OutOfLineList* list = this->list();
+ for (unsigned i = 0; i < list->m_length; ++i) {
+ if (!other.containsOutOfLine(list->list()[i]))
+ return false;
+ }
+ return true;
+ }
+
+ bool isSupersetOf(const TinyPtrSet& other) const
+ {
+ return other.isSubsetOf(*this);
+ }
+
+ bool overlaps(const TinyPtrSet& other) const
+ {
+ if (isThin()) {
+ if (!singleEntry())
+ return false;
+ return other.contains(singleEntry());
+ }
+
+ if (other.isThin()) {
+ if (!other.singleEntry())
+ return false;
+ return containsOutOfLine(other.singleEntry());
+ }
+
+ OutOfLineList* list = this->list();
+ for (unsigned i = 0; i < list->m_length; ++i) {
+ if (other.containsOutOfLine(list->list()[i]))
+ return true;
+ }
+ return false;
+ }
+
+ size_t size() const
+ {
+ if (isThin())
+ return !!singleEntry();
+ return list()->m_length;
+ }
+
+ T at(size_t i) const
+ {
+ if (isThin()) {
+ ASSERT(!i);
+ ASSERT(singleEntry());
+ return singleEntry();
+ }
+ ASSERT(i < list()->m_length);
+ return list()->list()[i];
+ }
+
+ T operator[](size_t i) const { return at(i); }
+
+ T last() const
+ {
+ if (isThin()) {
+ ASSERT(singleEntry());
+ return singleEntry();
+ }
+ return list()->list()[list()->m_length - 1];
+ }
+
+ class iterator {
+ public:
+ iterator()
+ : m_set(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(const TinyPtrSet* set, size_t index)
+ : m_set(set)
+ , m_index(index)
+ {
+ }
+
+ T operator*() const { return m_set->at(m_index); }
+ iterator& operator++()
+ {
+ m_index++;
+ return *this;
+ }
+ bool operator==(const iterator& other) const { return m_index == other.m_index; }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
+
+ private:
+ const TinyPtrSet* m_set;
+ size_t m_index;
+ };
+
+ iterator begin() const { return iterator(this, 0); }
+ iterator end() const { return iterator(this, size()); }
+
+ bool operator==(const TinyPtrSet& other) const
+ {
+ if (size() != other.size())
+ return false;
+ return isSubsetOf(other);
+ }
+
+private:
+ friend class JSC::DFG::StructureAbstractValue;
+
+ static const uintptr_t fatFlag = 1;
+ static const uintptr_t reservedFlag = 2;
+ static const uintptr_t flags = fatFlag | reservedFlag;
+ static const uintptr_t reservedValue = 4;
+
+ static const unsigned defaultStartingSize = 4;
+
+ bool addOutOfLine(T value)
+ {
+ OutOfLineList* list = this->list();
+ for (unsigned i = 0; i < list->m_length; ++i) {
+ if (list->list()[i] == value)
+ return false;
+ }
+
+ if (list->m_length < list->m_capacity) {
+ list->list()[list->m_length++] = value;
+ return true;
+ }
+
+ OutOfLineList* newList = OutOfLineList::create(list->m_capacity * 2);
+ newList->m_length = list->m_length + 1;
+ for (unsigned i = list->m_length; i--;)
+ newList->list()[i] = list->list()[i];
+ newList->list()[list->m_length] = value;
+ OutOfLineList::destroy(list);
+ set(newList);
+ return true;
+ }
+
+ bool containsOutOfLine(T value) const
+ {
+ OutOfLineList* list = this->list();
+ for (unsigned i = 0; i < list->m_length; ++i) {
+ if (list->list()[i] == value)
+ return true;
+ }
+ return false;
+ }
+
+ ALWAYS_INLINE void copyFrom(const TinyPtrSet& other)
+ {
+ if (other.isThin() || other.m_pointer == reservedValue) {
+ bool value = getReservedFlag();
+ m_pointer = other.m_pointer;
+ setReservedFlag(value);
+ return;
+ }
+ copyFromOutOfLine(other);
+ }
+
+ NEVER_INLINE void copyFromOutOfLine(const TinyPtrSet& other)
+ {
+ ASSERT(!other.isThin() && other.m_pointer != reservedValue);
+ OutOfLineList* otherList = other.list();
+ OutOfLineList* myList = OutOfLineList::create(otherList->m_length);
+ myList->m_length = otherList->m_length;
+ for (unsigned i = otherList->m_length; i--;)
+ myList->list()[i] = otherList->list()[i];
+ set(myList);
+ }
+
+ class OutOfLineList {
+ public:
+ static OutOfLineList* create(unsigned capacity)
+ {
+ return new (NotNull, fastMalloc(sizeof(OutOfLineList) + capacity * sizeof(T))) OutOfLineList(0, capacity);
+ }
+
+ static void destroy(OutOfLineList* list)
+ {
+ fastFree(list);
+ }
+
+ T* list() { return bitwise_cast<T*>(this + 1); }
+
+ OutOfLineList(unsigned length, unsigned capacity)
+ : m_length(length)
+ , m_capacity(capacity)
+ {
+ }
+
+ unsigned m_length;
+ unsigned m_capacity;
+ };
+
+ ALWAYS_INLINE void deleteListIfNecessary()
+ {
+ if (!isThin()) {
+ ASSERT(m_pointer != reservedValue);
+ OutOfLineList::destroy(list());
+ }
+ }
+
+ bool isThin() const { return !(m_pointer & fatFlag); }
+
+ void* pointer() const
+ {
+ return bitwise_cast<void*>(m_pointer & ~flags);
+ }
+
+ T singleEntry() const
+ {
+ ASSERT(isThin());
+ return bitwise_cast<T>(pointer());
+ }
+
+ OutOfLineList* list() const
+ {
+ ASSERT(!isThin());
+ return static_cast<OutOfLineList*>(pointer());
+ }
+
+ void set(T value)
+ {
+ set(bitwise_cast<uintptr_t>(value), true);
+ }
+ void set(OutOfLineList* list)
+ {
+ set(bitwise_cast<uintptr_t>(list), false);
+ }
+ void setEmpty()
+ {
+ set(0, true);
+ }
+ void set(uintptr_t pointer, bool singleEntry)
+ {
+ m_pointer = pointer | (singleEntry ? 0 : fatFlag) | (m_pointer & reservedFlag);
+ }
+ bool getReservedFlag() const { return m_pointer & reservedFlag; }
+ void setReservedFlag(bool value)
+ {
+ if (value)
+ m_pointer |= reservedFlag;
+ else
+ m_pointer &= ~reservedFlag;
+ }
+
+ uintptr_t m_pointer;
+};
+
+} // namespace WTF
+
+using WTF::TinyPtrSet;
+
+#endif // TinyPtrSet_h
+
diff --git a/Source/WTF/wtf/TypeCasts.h b/Source/WTF/wtf/TypeCasts.h
new file mode 100644
index 000000000..18b940c49
--- /dev/null
+++ b/Source/WTF/wtf/TypeCasts.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TypeCasts_h
+#define TypeCasts_h
+
+#include <type_traits>
+
+namespace WTF {
+
+template <typename ExpectedType, typename ArgType, bool isBaseType = std::is_base_of<ExpectedType, ArgType>::value>
+struct TypeCastTraits {
+ static bool isOfType(ArgType&)
+ {
+ // If you're hitting this assertion, it is likely because you used
+ // is<>() or downcast<>() with a type that doesn't have the needed
+ // TypeCastTraits specialization. Please use the following macro
+ // to add that specialization:
+ // SPECIALIZE_TYPE_TRAITS_BEGIN() / SPECIALIZE_TYPE_TRAITS_END()
+ static_assert(std::is_void<ExpectedType>::value, "Missing TypeCastTraits specialization");
+ return false;
+ }
+};
+
+// Template specialization for the case where ExpectedType is a base of ArgType,
+// so we can return return true unconditionally.
+template <typename ExpectedType, typename ArgType>
+struct TypeCastTraits<ExpectedType, ArgType, true /* isBaseType */> {
+ static bool isOfType(ArgType&) { return true; }
+};
+
+// Type checking function, to use before casting with downcast<>().
+template <typename ExpectedType, typename ArgType>
+inline bool is(ArgType& source)
+{
+ static_assert(std::is_base_of<ArgType, ExpectedType>::value, "Unnecessary type check");
+ return TypeCastTraits<const ExpectedType, const ArgType>::isOfType(source);
+}
+
+template <typename ExpectedType, typename ArgType>
+inline bool is(ArgType* source)
+{
+ static_assert(std::is_base_of<ArgType, ExpectedType>::value, "Unnecessary type check");
+ return source && TypeCastTraits<const ExpectedType, const ArgType>::isOfType(*source);
+}
+
+// Update T's constness to match Reference's.
+template <typename Reference, typename T>
+struct match_constness {
+ typedef typename std::conditional<std::is_const<Reference>::value, typename std::add_const<T>::type, typename std::remove_const<T>::type>::type type;
+};
+
+// Safe downcasting functions.
+template<typename Target, typename Source>
+inline typename match_constness<Source, Target>::type& downcast(Source& source)
+{
+ static_assert(!std::is_same<Source, Target>::value, "Unnecessary cast to same type");
+ static_assert(std::is_base_of<Source, Target>::value, "Should be a downcast");
+ ASSERT_WITH_SECURITY_IMPLICATION(is<Target>(source));
+ return static_cast<typename match_constness<Source, Target>::type&>(source);
+}
+template<typename Target, typename Source>
+inline typename match_constness<Source, Target>::type* downcast(Source* source)
+{
+ static_assert(!std::is_same<Source, Target>::value, "Unnecessary cast to same type");
+ static_assert(std::is_base_of<Source, Target>::value, "Should be a downcast");
+ ASSERT_WITH_SECURITY_IMPLICATION(!source || is<Target>(*source));
+ return static_cast<typename match_constness<Source, Target>::type*>(source);
+}
+
+// Add support for type checking / casting using is<>() / downcast<>() helpers for a specific class.
+#define SPECIALIZE_TYPE_TRAITS_BEGIN(ClassName) \
+namespace WTF { \
+template <typename ArgType> \
+class TypeCastTraits<const ClassName, ArgType, false /* isBaseType */> { \
+public: \
+ static bool isOfType(ArgType& source) { return isType(source); } \
+private:
+
+#define SPECIALIZE_TYPE_TRAITS_END() \
+}; \
+}
+
+} // namespace WTF
+
+using WTF::TypeCastTraits;
+using WTF::is;
+using WTF::downcast;
+
+#endif // TypeCasts_h
diff --git a/Source/WTF/wtf/UniStdExtras.cpp b/Source/WTF/wtf/UniStdExtras.cpp
new file mode 100644
index 000000000..4e33576e6
--- /dev/null
+++ b/Source/WTF/wtf/UniStdExtras.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 Igalia S.L.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UniStdExtras.h"
+
+#include <fcntl.h>
+
+namespace WTF {
+
+bool setCloseOnExec(int fileDescriptor)
+{
+ int returnValue = -1;
+ do {
+ int flags = fcntl(fileDescriptor, F_GETFD);
+ if (flags != -1)
+ returnValue = fcntl(fileDescriptor, F_SETFD, flags | FD_CLOEXEC);
+ } while (returnValue == -1 && errno == EINTR);
+
+ return returnValue != -1;
+}
+
+int dupCloseOnExec(int fileDescriptor)
+{
+ int duplicatedFileDescriptor = -1;
+#ifdef F_DUPFD_CLOEXEC
+ while ((duplicatedFileDescriptor = fcntl(fileDescriptor, F_DUPFD_CLOEXEC, 0)) == -1 && errno == EINTR) { }
+ if (duplicatedFileDescriptor != -1)
+ return duplicatedFileDescriptor;
+
+#endif
+
+ while ((duplicatedFileDescriptor = dup(fileDescriptor)) == -1 && errno == EINTR) { }
+ if (duplicatedFileDescriptor == -1)
+ return -1;
+
+ if (!setCloseOnExec(duplicatedFileDescriptor)) {
+ closeWithRetry(duplicatedFileDescriptor);
+ return -1;
+ }
+
+ return duplicatedFileDescriptor;
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/UniStdExtras.h b/Source/WTF/wtf/UniStdExtras.h
index 14acff1e7..c3edcf235 100644
--- a/Source/WTF/wtf/UniStdExtras.h
+++ b/Source/WTF/wtf/UniStdExtras.h
@@ -31,6 +31,9 @@
namespace WTF {
+bool setCloseOnExec(int fileDescriptor);
+int dupCloseOnExec(int fileDescriptor);
+
inline int closeWithRetry(int fileDescriptor)
{
int ret;
@@ -50,5 +53,7 @@ inline int closeWithRetry(int fileDescriptor)
} // namespace WTF
using WTF::closeWithRetry;
+using WTF::setCloseOnExec;
+using WTF::dupCloseOnExec;
#endif // UniStdExtras_h
diff --git a/Source/WTF/wtf/UniqueRef.h b/Source/WTF/wtf/UniqueRef.h
new file mode 100644
index 000000000..2039950d9
--- /dev/null
+++ b/Source/WTF/wtf/UniqueRef.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <memory>
+#include <wtf/Assertions.h>
+
+namespace WTF {
+
+template<typename T> class UniqueRef;
+
+template<typename T, class... Args>
+UniqueRef<T> makeUniqueRef(Args&&... args)
+{
+ return UniqueRef<T>(*new T(std::forward<Args>(args)...));
+}
+
+template<typename T>
+class UniqueRef {
+public:
+ template <typename U>
+ UniqueRef(UniqueRef<U>&& other)
+ : m_ref(WTFMove(other.m_ref))
+ {
+ ASSERT(m_ref);
+ }
+
+ T& get() { ASSERT(m_ref); return *m_ref; }
+ const T& get() const { ASSERT(m_ref); return *m_ref; }
+
+ T* operator&() { ASSERT(m_ref); return m_ref.get(); }
+ const T* operator&() const { ASSERT(m_ref); return m_ref.get(); }
+
+ T* operator->() { ASSERT(m_ref); return m_ref.get(); }
+ const T* operator->() const { ASSERT(m_ref); return m_ref.get(); }
+
+ operator T&() { ASSERT(m_ref); return *m_ref; }
+ operator const T&() const { ASSERT(m_ref); return *m_ref; }
+
+private:
+ template<class U, class... Args> friend UniqueRef<U> makeUniqueRef(Args&&...);
+ template<class U> friend class UniqueRef;
+
+ UniqueRef(T& other)
+ : m_ref(&other)
+ {
+ ASSERT(m_ref);
+ }
+
+ std::unique_ptr<T> m_ref;
+};
+
+} // namespace WTF
+
+using WTF::UniqueRef;
+using WTF::makeUniqueRef;
diff --git a/Source/WTF/wtf/VMTags.h b/Source/WTF/wtf/VMTags.h
index 117bc3721..aef05d2b5 100644
--- a/Source/WTF/wtf/VMTags.h
+++ b/Source/WTF/wtf/VMTags.h
@@ -56,19 +56,12 @@
#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(63)
#endif // defined(VM_MEMORY_JAVASCRIPT_CORE)
-#if defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
-#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
-#else
-#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(69)
-#endif // defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
-
#else // OS(DARWIN)
#define VM_TAG_FOR_TCMALLOC_MEMORY -1
#define VM_TAG_FOR_COLLECTOR_MEMORY -1
#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY -1
#define VM_TAG_FOR_REGISTERFILE_MEMORY -1
-#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY -1
#endif // OS(DARWIN)
diff --git a/Source/WTF/wtf/Variant.h b/Source/WTF/wtf/Variant.h
new file mode 100644
index 000000000..6d4a7399c
--- /dev/null
+++ b/Source/WTF/wtf/Variant.h
@@ -0,0 +1,2079 @@
+// Copyright (c) 2015, Just Software Solutions Ltd
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or
+// without modification, are permitted provided that the
+// following conditions are met:
+//
+// 1. Redistributions of source code must retain the above
+// copyright notice, this list of conditions and the following
+// disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials
+// provided with the distribution.
+//
+// 3. Neither the name of the copyright holder nor the names of
+// its contributors may be used to endorse or promote products
+// derived from this software without specific prior written
+// permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Copied from https://bitbucket.org/anthonyw/variant/src (5bce47fa788648f79e5ea1d77b0eef2e8f0b2999)
+
+// Modified to make it compile with exceptions disabled.
+
+#pragma once
+
+#include <functional>
+#include <limits.h>
+#include <new>
+#include <stddef.h>
+#include <stdexcept>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <wtf/Compiler.h>
+#include <wtf/StdLibExtras.h>
+
+#if COMPILER(MSVC)
+#pragma warning(push)
+#pragma warning(disable:4245)
+#pragma warning(disable:4521)
+#pragma warning(disable:4522)
+#pragma warning(disable:4814)
+#endif
+
+#if !COMPILER(CLANG) || WTF_CPP_STD_VER >= 14
+
+namespace WTF {
+
+#if COMPILER_SUPPORTS(EXCEPTIONS)
+#define __THROW_EXCEPTION(__exception) throw __exception;
+#define __NOEXCEPT noexcept
+#define __NOEXCEPT_(__exception) noexcept(__exception)
+#else
+#define __THROW_EXCEPTION(__exception) do { (void)__exception; CRASH(); } while (0);
+#define __NOEXCEPT
+#define __NOEXCEPT_(...)
+#endif
+
+struct __in_place_private{
+ template<typename>
+ struct __type_holder;
+
+ template<size_t>
+ struct __value_holder;
+};
+
+
+struct in_place_tag {
+ in_place_tag() = delete;
+};
+
+using in_place_t = in_place_tag(&)(__in_place_private&);
+
+template <class _Type>
+using in_place_type_t = in_place_tag(&)(__in_place_private::__type_holder<_Type>&);
+
+template <size_t _Index>
+using in_place_index_t = in_place_tag(&)(__in_place_private::__value_holder<_Index>&);
+
+in_place_tag in_place(__in_place_private&);
+
+template <class _Type>
+in_place_tag in_place(__in_place_private::__type_holder<_Type> &) {
+ __THROW_EXCEPTION(__in_place_private());
+}
+
+template <size_t _Index>
+in_place_tag in_place(__in_place_private::__value_holder<_Index> &) {
+ __THROW_EXCEPTION(__in_place_private());
+}
+
+class bad_variant_access: public std::logic_error{
+public:
+ explicit bad_variant_access(const std::string& what_arg):
+ std::logic_error(what_arg)
+ {}
+ explicit bad_variant_access(const char* what_arg):
+ std::logic_error(what_arg)
+ {}
+};
+
+template<typename T>
+NO_RETURN_DUE_TO_CRASH inline T __throw_bad_variant_access(const char* what_arg){
+ __THROW_EXCEPTION(bad_variant_access(what_arg))
+}
+
+template<ptrdiff_t _Offset,typename _Type,typename ... _Types>
+struct __type_index_helper;
+
+template<ptrdiff_t _Offset,typename _Type,typename _Head,typename ... _Rest>
+struct __type_index_helper<_Offset,_Type,_Head,_Rest...>{
+ static constexpr ptrdiff_t __value=
+ __type_index_helper<_Offset+1,_Type,_Rest...>::__value;
+};
+
+template<ptrdiff_t _Offset,typename _Type,typename ... _Rest>
+struct __type_index_helper<_Offset,_Type,_Type,_Rest...>{
+ static constexpr ptrdiff_t __value=_Offset;
+};
+
+template<typename _Type,typename ... _Types>
+struct __type_index{
+ static constexpr ptrdiff_t __value=
+ __type_index_helper<0,_Type,_Types...>::__value;
+};
+
+template<ptrdiff_t _Index,typename ... _Types>
+struct __indexed_type;
+
+template<typename _Head,typename ... _Rest>
+struct __indexed_type<0,_Head,_Rest...>{
+ typedef _Head __type;
+};
+
+template<typename _Head,typename ... _Rest>
+struct __indexed_type<-1,_Head,_Rest...>{
+ typedef void __type;
+};
+
+template<ptrdiff_t _Index,typename _Head,typename ... _Rest>
+struct __indexed_type<_Index,_Head,_Rest...>{
+ typedef typename __indexed_type<_Index-1,_Rest...>::__type __type;
+};
+
+template<ptrdiff_t _Index,typename ..._Types>
+struct __next_index{
+ static constexpr ptrdiff_t __value=
+ (_Index>=ptrdiff_t(sizeof...(_Types)-1))?-1:_Index+1;
+};
+
+template<typename ... _Types>
+class Variant;
+
+template<typename>
+struct variant_size;
+
+template <typename _Type>
+struct variant_size<const _Type> : variant_size<_Type> {};
+
+template <typename _Type>
+struct variant_size<volatile _Type> : variant_size<_Type> {};
+
+template <typename _Type>
+struct variant_size<const volatile _Type> : variant_size<_Type> {};
+
+template <typename... _Types>
+struct variant_size<Variant<_Types...>>
+ : std::integral_constant<size_t, sizeof...(_Types)> {};
+
+template<size_t _Index,typename _Type>
+struct variant_alternative;
+
+template<size_t _Index,typename _Type>
+using variant_alternative_t=typename variant_alternative<_Index,_Type>::type;
+
+template <size_t _Index, typename _Type>
+struct variant_alternative<_Index, const _Type>{
+ using type=std::add_const_t<variant_alternative_t<_Index,_Type>>;
+};
+
+template <size_t _Index, typename _Type>
+struct variant_alternative<_Index, volatile _Type>{
+ using type=std::add_volatile_t<variant_alternative_t<_Index,_Type>>;
+};
+
+template <size_t _Index, typename _Type>
+struct variant_alternative<_Index, volatile const _Type>{
+ using type=std::add_volatile_t<std::add_const_t<variant_alternative_t<_Index,_Type>>>;
+};
+
+template<size_t _Index,typename ... _Types>
+struct variant_alternative<_Index,Variant<_Types...>>{
+ using type=typename __indexed_type<_Index,_Types...>::__type;
+};
+
+constexpr size_t variant_npos=-1;
+
+template<typename _Type,typename ... _Types>
+constexpr _Type& get(Variant<_Types...>&);
+
+template<typename _Type,typename ... _Types>
+constexpr _Type const& get(Variant<_Types...> const&);
+
+template<typename _Type,typename ... _Types>
+constexpr _Type&& get(Variant<_Types...>&&);
+
+template<typename _Type,typename ... _Types>
+constexpr const _Type&& get(Variant<_Types...> const&&);
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr typename __indexed_type<_Index,_Types...>::__type& get(Variant<_Types...>&);
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr typename __indexed_type<_Index,_Types...>::__type&& get(Variant<_Types...>&&);
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr typename __indexed_type<_Index,_Types...>::__type const& get(
+ Variant<_Types...> const&);
+
+template <ptrdiff_t _Index, typename... _Types>
+constexpr const typename __indexed_type<_Index, _Types...>::__type &&
+get(Variant<_Types...> const &&);
+
+template<typename _Type,typename ... _Types>
+constexpr std::add_pointer_t<_Type> get_if(Variant<_Types...>&);
+
+template<typename _Type,typename ... _Types>
+constexpr std::add_pointer_t<_Type const> get_if(Variant<_Types...> const&);
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr std::add_pointer_t<typename __indexed_type<_Index,_Types...>::__type> get_if(Variant<_Types...>&);
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr std::add_pointer_t<typename __indexed_type<_Index,_Types...>::__type const> get_if(
+ Variant<_Types...> const&);
+
+template<ptrdiff_t _Index,typename ... _Types>
+struct __variant_accessor;
+
+template<size_t __count,
+ bool __larger_than_char=(__count>SCHAR_MAX),
+ bool __larger_than_short=(__count>SHRT_MAX),
+ bool __larger_than_int=(__count>INT_MAX)>
+struct __discriminator_type{
+ typedef signed char __type;
+};
+
+template<size_t __count>
+struct __discriminator_type<__count,true,false,false>{
+ typedef signed short __type;
+};
+
+template<size_t __count>
+struct __discriminator_type<__count,true,true,false>{
+ typedef int __type;
+};
+template<size_t __count>
+struct __discriminator_type<__count,true,true,true>{
+ typedef signed long __type;
+};
+
+template<typename _Type>
+struct __stored_type{
+ typedef _Type __type;
+};
+
+template<typename _Type>
+struct __stored_type<_Type&>{
+ typedef _Type* __type;
+};
+
+template<typename ... _Types>
+struct __all_trivially_destructible;
+
+template<>
+struct __all_trivially_destructible<> {
+ static constexpr bool __value=true;
+};
+
+template<typename _Type>
+struct __all_trivially_destructible<_Type> {
+ static constexpr bool __value=
+ std::is_trivially_destructible<typename __stored_type<_Type>::__type>::value;
+};
+
+template<typename _Head,typename ... _Rest>
+struct __all_trivially_destructible<_Head,_Rest...> {
+ static constexpr bool __value=
+ __all_trivially_destructible<_Head>::__value &&
+ __all_trivially_destructible<_Rest...>::__value;
+};
+
+template<typename _Target,typename ... _Args>
+struct __storage_nothrow_constructible{
+ static const bool __value=
+ std::is_nothrow_constructible<_Target, _Args...>::value;
+};
+
+template<typename ... _Types>
+struct __storage_nothrow_move_constructible;
+
+template<>
+struct __storage_nothrow_move_constructible<> {
+ static constexpr bool __value=true;
+};
+
+template<typename _Type>
+struct __storage_nothrow_move_constructible<_Type> {
+ static constexpr bool __value=
+ std::is_nothrow_move_constructible<
+ typename __stored_type<_Type>::__type>::value;
+};
+
+template<typename _Head,typename ... _Rest>
+struct __storage_nothrow_move_constructible<_Head,_Rest...> {
+ static constexpr bool __value=
+ __storage_nothrow_move_constructible<_Head>::__value &&
+ __storage_nothrow_move_constructible<_Rest...>::__value;
+};
+
+template<ptrdiff_t _Index,typename ... _Types>
+struct __other_storage_nothrow_move_constructible;
+
+template<typename _Head,typename ... _Rest>
+struct __other_storage_nothrow_move_constructible<0,_Head,_Rest...>{
+ static const bool __value=__storage_nothrow_move_constructible<_Rest...>::__value;
+};
+
+template<typename _Head,typename ... _Rest>
+struct __other_storage_nothrow_move_constructible<-1,_Head,_Rest...>{
+ static const bool __value=
+ __storage_nothrow_move_constructible<_Head,_Rest...>::__value;
+};
+
+template<ptrdiff_t _Index,typename _Head,typename ... _Rest>
+struct __other_storage_nothrow_move_constructible<_Index,_Head,_Rest...>{
+ static const bool __value=
+ __storage_nothrow_move_constructible<_Head>::__value &&
+ __other_storage_nothrow_move_constructible<_Index-1,_Rest...>::__value;
+};
+
+template<ptrdiff_t _Index,typename ... _Types>
+struct __backup_storage_required{
+ static const bool __value=
+ !__storage_nothrow_move_constructible<
+ typename __indexed_type<_Index,_Types...>::__type>::__value &&
+ !__other_storage_nothrow_move_constructible<_Index,_Types...>::__value;
+};
+
+template<ptrdiff_t _Index,ptrdiff_t _Count,typename ... _Types>
+struct __any_backup_storage_required_impl{
+ static const bool __value=
+ __backup_storage_required<_Index,_Types...>::__value ||
+ __any_backup_storage_required_impl<_Index+1,_Count-1,_Types...>::__value;
+};
+
+template<ptrdiff_t _Index,typename ... _Types>
+struct __any_backup_storage_required_impl<_Index,0,_Types...>{
+ static const bool __value=false;
+};
+
+template<typename _Variant>
+struct __any_backup_storage_required;
+
+template<typename ... _Types>
+struct __any_backup_storage_required<Variant<_Types...> >{
+ static const bool __value=
+ __any_backup_storage_required_impl<0,sizeof...(_Types),_Types...>::__value;
+};
+
+template<typename ... _Types>
+union __variant_data;
+
+template<typename _Type,bool=std::is_literal_type<_Type>::value>
+struct __variant_storage{
+ typedef _Type __type;
+
+ static constexpr _Type& __get(__type& __val){
+ return __val;
+ }
+ static constexpr _Type&& __get_rref(__type& __val){
+ return std::move(__val);
+ }
+ static constexpr const _Type& __get(__type const& __val){
+ return __val;
+ }
+ static constexpr const _Type&& __get_rref(__type const& __val){
+ return std::move(__val);
+ }
+ static void __destroy(__type&){}
+};
+
+template<typename _Type>
+struct __storage_wrapper{
+ typename std::aligned_storage<sizeof(_Type),alignof(_Type)>::type __storage;
+
+ template<typename ... _Args>
+ static constexpr void __construct(void* __p,_Args&& ... __args){
+ new (__p) _Type(std::forward<_Args>(__args)...);
+ }
+
+ template <typename _Dummy = _Type>
+ __storage_wrapper(
+ typename std::enable_if<std::is_default_constructible<_Dummy>::value,
+ void (__storage_wrapper::*)()>::type = nullptr) {
+ __construct(&__storage);
+ }
+
+ template <typename _Dummy = _Type>
+ __storage_wrapper(
+ typename std::enable_if<!std::is_default_constructible<_Dummy>::value,
+ void (__storage_wrapper::*)()>::type = nullptr) {
+ }
+
+ template<typename _First,typename ... _Args>
+ __storage_wrapper(_First&& __first,_Args&& ... __args){
+ __construct(&__storage,std::forward<_First>(__first),std::forward<_Args>(__args)...);
+ }
+
+ _Type& __get(){
+ return *static_cast<_Type*>(static_cast<void*>(&__storage));
+ }
+ constexpr _Type const& __get() const{
+ return *static_cast<_Type const*>(static_cast<void const*>(&__storage));
+ }
+ void __destroy(){
+ __get().~_Type();
+ }
+};
+
+template<typename _Type>
+struct __storage_wrapper<_Type&>{
+ _Type* __storage;
+
+ template<typename _Arg>
+ constexpr __storage_wrapper(_Arg& __arg):
+ __storage(&__arg){}
+
+ _Type& __get(){
+ return *__storage;
+ }
+ constexpr _Type const& __get() const{
+ return *__storage;
+ }
+};
+
+template<typename _Type>
+struct __variant_storage<_Type,false>{
+ typedef __storage_wrapper<_Type> __type;
+
+ static constexpr _Type& __get(__type& __val){
+ return __val.__get();
+ }
+ static constexpr _Type&& __get_rref(__type& __val){
+ return std::move(__val.__get());
+ }
+ static constexpr const _Type& __get(__type const& __val){
+ return __val.__get();
+ }
+ static constexpr const _Type&& __get_rref(__type const& __val){
+ return std::move(__val.__get());
+ }
+ static void __destroy(__type& __val){
+ __val.__destroy();
+ }
+};
+
+template<typename _Type,bool __b>
+struct __variant_storage<_Type&,__b>{
+ typedef _Type* __type;
+
+ static constexpr _Type& __get(__type& __val){
+ return *__val;
+ }
+ static constexpr _Type& __get_rref(__type& __val){
+ return *__val;
+ }
+ static constexpr _Type& __get(__type const& __val){
+ return *__val;
+ }
+ static constexpr _Type& __get_rref(__type const& __val){
+ return *__val;
+ }
+ static void __destroy(__type&){}
+};
+
+template<typename _Type,bool __b>
+struct __variant_storage<_Type&&,__b>{
+ typedef _Type* __type;
+
+ static constexpr _Type&& __get(__type& __val){
+ return static_cast<_Type&&>(*__val);
+ }
+ static constexpr _Type&& __get_rref(__type& __val){
+ return static_cast<_Type&&>(*__val);
+ }
+ static constexpr _Type&& __get(__type const& __val){
+ return static_cast<_Type&&>(*__val);
+ }
+ static constexpr _Type&& __get_rref(__type const& __val){
+ return static_cast<_Type&&>(*__val);
+ }
+ static void __destroy(__type&){}
+};
+
+template<>
+union __variant_data<>{
+ constexpr __variant_data(){}
+};
+
+template<typename _Type>
+union __variant_data<_Type>{
+ typename __variant_storage<_Type>::__type __val;
+ struct __dummy_type{} __dummy;
+
+ constexpr __variant_data():__dummy(){}
+
+ template<typename ... _Args>
+ constexpr __variant_data(in_place_index_t<0>,_Args&& ... __args):
+ __val(std::forward<_Args>(__args)...){}
+
+ _Type& __get(in_place_index_t<0>){
+ return __variant_storage<_Type>::__get(__val);
+ }
+ /*constexpr*/ _Type&& __get_rref(in_place_index_t<0>){
+ return __variant_storage<_Type>::__get_rref(__val);
+ }
+ constexpr const _Type& __get(in_place_index_t<0>) const{
+ return __variant_storage<_Type>::__get(__val);
+ }
+ constexpr const _Type&& __get_rref(in_place_index_t<0>) const{
+ return __variant_storage<_Type>::__get_rref(__val);
+ }
+ void __destroy(in_place_index_t<0>){
+ __variant_storage<_Type>::__destroy(__val);
+ }
+};
+
+template<typename _Type>
+union __variant_data<_Type&>{
+ typename __variant_storage<_Type&>::__type __val;
+ struct __dummy_type{} __dummy;
+
+ constexpr __variant_data():__dummy(){}
+
+ template<typename ... _Args>
+ constexpr __variant_data(in_place_index_t<0>,_Args&& ... __args):
+ __val(&std::forward<_Args>(__args)...){}
+
+ _Type& __get(in_place_index_t<0>){
+ return __variant_storage<_Type&>::__get(__val);
+ }
+ constexpr _Type& __get(in_place_index_t<0>) const{
+ return __variant_storage<_Type&>::__get(__val);
+ }
+
+ _Type& __get_rref(in_place_index_t<0>){
+ return __variant_storage<_Type&>::__get_rref(__val);
+ }
+ constexpr _Type& __get_rref(in_place_index_t<0>) const{
+ return __variant_storage<_Type&>::__get_rref(__val);
+ }
+
+ void __destroy(in_place_index_t<0>){
+ __variant_storage<_Type&>::__destroy(__val);
+ }
+};
+
+template<typename _Type>
+union __variant_data<_Type&&>{
+ typename __variant_storage<_Type&&>::__type __val;
+ struct __dummy_type{} __dummy;
+
+ constexpr __variant_data():__dummy(){}
+
+ template<typename _Arg>
+ __variant_data(in_place_index_t<0>,_Arg&& __arg):
+ __val(&__arg){}
+
+ _Type&& __get(in_place_index_t<0>){
+ return __variant_storage<_Type&&>::__get(__val);
+ }
+ constexpr _Type&& __get(in_place_index_t<0>) const{
+ return __variant_storage<_Type&&>::__get(__val);
+ }
+ _Type&& __get_rref(in_place_index_t<0>){
+ return __variant_storage<_Type&&>::__get_rref(__val);
+ }
+ constexpr _Type&& __get_rref(in_place_index_t<0>) const{
+ return __variant_storage<_Type&&>::__get_rref(__val);
+ }
+ void __destroy(in_place_index_t<0>){
+ __variant_storage<_Type&&>::__destroy(__val);
+ }
+};
+
+template<typename _Head,typename ... _Rest>
+union __variant_data<_Head,_Rest...>{
+ __variant_data<_Head> __head;
+ __variant_data<_Rest...> __rest;
+
+ constexpr __variant_data():
+ __head(){}
+
+ template<typename ... _Args>
+ constexpr __variant_data(in_place_index_t<0>,_Args&& ... __args):
+ __head(in_place<0>,std::forward<_Args>(__args)...){}
+ template<size_t _Index,typename ... _Args>
+ constexpr __variant_data(in_place_index_t<_Index>,_Args&& ... __args):
+ __rest(in_place<_Index-1>,std::forward<_Args>(__args)...){}
+
+ _Head& __get(in_place_index_t<0>){
+ return __head.__get(in_place<0>);
+ }
+
+ /*constexpr*/ _Head&& __get_rref(in_place_index_t<0>){
+ return __head.__get_rref(in_place<0>);
+ }
+
+ constexpr const _Head& __get(in_place_index_t<0>) const{
+ return __head.__get(in_place<0>);
+ }
+
+ constexpr const _Head&& __get_rref(in_place_index_t<0>) const{
+ return __head.__get_rref(in_place<0>);
+ }
+
+ template<size_t _Index>
+ typename __indexed_type<_Index-1,_Rest...>::__type& __get(
+ in_place_index_t<_Index>){
+ return __rest.__get(in_place<_Index-1>);
+ }
+
+ template<size_t _Index>
+ /*constexpr*/ typename __indexed_type<_Index-1,_Rest...>::__type&& __get_rref(
+ in_place_index_t<_Index>){
+ return __rest.__get_rref(in_place<_Index-1>);
+ }
+
+ template<size_t _Index>
+ constexpr const typename __indexed_type<_Index-1,_Rest...>::__type& __get(
+ in_place_index_t<_Index>) const{
+ return __rest.__get(in_place<_Index-1>);
+ }
+
+ template<size_t _Index>
+ constexpr const typename __indexed_type<_Index-1,_Rest...>::__type&& __get_rref(
+ in_place_index_t<_Index>) const{
+ return __rest.__get_rref(in_place<_Index-1>);
+ }
+
+
+ void __destroy(in_place_index_t<0>){
+ __head.__destroy(in_place<0>);
+ }
+ template<size_t _Index>
+ void __destroy(in_place_index_t<_Index>){
+ __rest.__destroy(in_place<_Index-1>);
+ }
+};
+
+
+template<ptrdiff_t... _Indices>
+struct __index_sequence{
+ typedef __index_sequence<_Indices...,sizeof...(_Indices)> __next;
+ static constexpr size_t __length=sizeof...(_Indices);
+};
+
+template<typename ... _Types>
+struct __type_indices;
+
+template<>
+struct __type_indices<>{
+ typedef __index_sequence<> __type;
+};
+
+template<typename _Type>
+struct __type_indices<_Type>{
+ typedef __index_sequence<0> __type;
+};
+
+template<typename _Type,typename ... _Rest>
+struct __type_indices<_Type,_Rest...>{
+ typedef typename __type_indices<_Rest...>::__type::__next __type;
+};
+
+template<typename _Variant>
+struct __variant_indices;
+
+template<typename ... _Types>
+struct __variant_indices<Variant<_Types...>>{
+ typedef typename __type_indices<_Types...>::__type __type;
+};
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __move_construct_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __move_construct_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef void(* const __func_type)(_Variant*,_Variant&);
+
+ template<ptrdiff_t _Index>
+ static void __move_construct_func(
+ _Variant * __lhs,_Variant& __rhs){
+ __lhs->template __emplace_construct<_Index>(
+ std::move(get<_Index>(__rhs)));
+ }
+
+ static const __func_type __apply[sizeof...(_Indices)];
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __move_construct_op_table<_Variant,__index_sequence<_Indices...>>::
+__func_type
+__move_construct_op_table<_Variant,__index_sequence<_Indices...>>::__apply[
+ sizeof...(_Indices)]={
+ &__move_construct_func<_Indices>...
+ };
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __move_assign_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __move_assign_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef void(* const __func_type)(_Variant*,_Variant&);
+
+ template<ptrdiff_t _Index>
+ static void __move_assign_func(
+ _Variant * __lhs,_Variant& __rhs){
+ get<_Index>(*__lhs)=std::move(get<_Index>(__rhs));
+ }
+
+ static const __func_type __apply[sizeof...(_Indices)];
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __move_assign_op_table<_Variant,__index_sequence<_Indices...>>::
+__func_type
+__move_assign_op_table<_Variant,__index_sequence<_Indices...>>::__apply[
+ sizeof...(_Indices)]={
+ &__move_assign_func<_Indices>...
+ };
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __copy_construct_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __copy_construct_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef void(* const __func_type)(_Variant*,_Variant const&);
+
+ template<ptrdiff_t _Index>
+ static void __copy_construct_func(
+ _Variant * __lhs,_Variant const& __rhs){
+ __lhs->template __emplace_construct<_Index>(
+ get<_Index>(__rhs));
+ }
+
+ static const __func_type __apply[sizeof...(_Indices)];
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __copy_construct_op_table<_Variant,__index_sequence<_Indices...>>::
+__func_type
+__copy_construct_op_table<_Variant,__index_sequence<_Indices...>>::__apply[
+ sizeof...(_Indices)]={
+ &__copy_construct_func<_Indices>...
+ };
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __copy_assign_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __copy_assign_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef void(* const __func_type)(_Variant*,_Variant const&);
+
+ template<ptrdiff_t _Index>
+ static void __copy_assign_func(
+ _Variant * __lhs,_Variant const& __rhs){
+ get<_Index>(*__lhs)=get<_Index>(__rhs);
+ }
+
+ static const __func_type __apply[sizeof...(_Indices)];
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __copy_assign_op_table<_Variant,__index_sequence<_Indices...>>::
+__func_type
+__copy_assign_op_table<_Variant,__index_sequence<_Indices...>>::__apply[
+ sizeof...(_Indices)]={
+ &__copy_assign_func<_Indices>...
+ };
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __destroy_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __destroy_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef void(* const __func_type)(_Variant*);
+
+ template<ptrdiff_t _Index>
+ static void __destroy_func(
+ _Variant * __self){
+ if(__self->__index>=0){
+ __self->__storage.__destroy(in_place<_Index>);
+ }
+ }
+
+ static const __func_type __apply[sizeof...(_Indices)];
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __destroy_op_table<_Variant,__index_sequence<_Indices...>>::
+__func_type
+__destroy_op_table<_Variant,__index_sequence<_Indices...>>::__apply[
+ sizeof...(_Indices)]={
+ &__destroy_func<_Indices>...
+ };
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __swap_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __swap_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef void(* const __func_type)(_Variant&,_Variant&);
+
+ template<ptrdiff_t _Index>
+ static void __swap_func(
+ _Variant & __lhs,_Variant & __rhs){
+ swap(get<_Index>(__lhs),get<_Index>(__rhs));
+ }
+
+ static const __func_type __apply[sizeof...(_Indices)];
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __swap_op_table<_Variant,__index_sequence<_Indices...>>::
+__func_type
+__swap_op_table<_Variant,__index_sequence<_Indices...>>::__apply[
+ sizeof...(_Indices)]={
+ &__swap_func<_Indices>...
+ };
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __equality_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __equality_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef bool(* const __compare_func_type)(_Variant const&,_Variant const&);
+
+ template<ptrdiff_t _Index>
+ static constexpr bool __equality_compare_func(
+ _Variant const& __lhs,_Variant const& __rhs){
+ return get<_Index>(__lhs)==get<_Index>(__rhs);
+ }
+
+ static constexpr __compare_func_type __equality_compare[sizeof...(_Indices)]={
+ &__equality_compare_func<_Indices>...
+ };
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+constexpr typename __equality_op_table<_Variant,__index_sequence<_Indices...>>::
+__compare_func_type
+__equality_op_table<_Variant,__index_sequence<_Indices...>>::__equality_compare[
+ sizeof...(_Indices)];
+
+template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+struct __less_than_op_table;
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __less_than_op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef bool(* const __compare_func_type)(_Variant const&,_Variant const&);
+
+ template<ptrdiff_t _Index>
+ static constexpr bool __less_than_compare_func(
+ _Variant const& __lhs,_Variant const& __rhs){
+ return get<_Index>(__lhs)<get<_Index>(__rhs);
+ }
+
+ static constexpr __compare_func_type __less_than_compare[sizeof...(_Indices)]={
+ &__less_than_compare_func<_Indices>...
+ };
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+constexpr typename __less_than_op_table<_Variant,__index_sequence<_Indices...>>::
+__compare_func_type
+__less_than_op_table<_Variant,__index_sequence<_Indices...>>::__less_than_compare[
+ sizeof...(_Indices)];
+
+template<typename _Variant>
+struct __variant_storage_type;
+
+template<typename _Derived,bool __trivial_destructor>
+struct __variant_base
+{
+ ~__variant_base(){
+ static_cast<_Derived*>(this)->__destroy_self();
+ }
+};
+
+template<typename _Derived>
+struct __variant_base<_Derived,true>{
+};
+
+
+template<ptrdiff_t _Offset,typename _CurrentSequence,
+ typename _Type,typename ... _Types>
+struct __all_indices_helper;
+
+template<ptrdiff_t _Offset,ptrdiff_t ... _Indices,
+ typename _Type,typename ... _Rest>
+struct __all_indices_helper<
+ _Offset,__index_sequence<_Indices...>,
+ _Type,_Type,_Rest...>{
+ typedef typename __all_indices_helper<
+ _Offset+1,__index_sequence<_Indices...,_Offset>,_Type,_Rest...>::__type
+ __type;
+};
+
+template<ptrdiff_t _Offset,typename _CurrentSequence,
+ typename _Type,typename _Head,typename ... _Rest>
+struct __all_indices_helper<_Offset,_CurrentSequence,_Type,_Head,_Rest...>{
+ typedef typename __all_indices_helper<
+ _Offset+1,_CurrentSequence,_Type,_Rest...>::__type __type;
+};
+
+template<ptrdiff_t _Offset,typename _CurrentSequence,typename _Type>
+struct __all_indices_helper<_Offset,_CurrentSequence,_Type>{
+ typedef _CurrentSequence __type;
+};
+
+template<typename _Type,typename ... _Types>
+struct __all_indices{
+ typedef typename __all_indices_helper<
+ 0,__index_sequence<>,_Type,_Types...>::__type __type;
+};
+
+template<typename ... _Sequences>
+struct __combine_sequences;
+
+template<ptrdiff_t ... _Indices1,ptrdiff_t ... _Indices2>
+struct __combine_sequences<
+ __index_sequence<_Indices1...>,__index_sequence<_Indices2...>>{
+ typedef __index_sequence<_Indices1...,_Indices2...> __type;
+};
+
+template<typename _Sequence,typename ... _Rest>
+struct __combine_sequences<_Sequence,_Rest...>{
+ typedef typename __combine_sequences<
+ _Sequence,
+ typename __combine_sequences<_Rest...>::__type>::__type __type;
+};
+
+template<typename _Indices>
+struct __first_index;
+
+template<ptrdiff_t _FirstIndex,ptrdiff_t ... _Rest>
+struct __first_index<__index_sequence<_FirstIndex,_Rest...>>{
+ static constexpr ptrdiff_t __value=_FirstIndex;
+};
+
+template<ptrdiff_t _Offset,typename _CurrentSequence,
+ typename _Type,typename ... _Types>
+struct __constructible_matches_helper;
+
+template<ptrdiff_t _Offset,typename _Sequence,typename _Type>
+struct __constructible_matches_helper<
+ _Offset,_Sequence,_Type>{
+ typedef _Sequence __type;
+};
+
+template<bool _Accept,ptrdiff_t _Entry>
+struct __sequence_or_empty{
+ typedef __index_sequence<> __type;
+};
+
+template<ptrdiff_t _Entry>
+struct __sequence_or_empty<true,_Entry>{
+ typedef __index_sequence<_Entry> __type;
+};
+
+template<ptrdiff_t _Offset,typename _CurrentSequence,
+ typename _Type,typename _Head,typename ... _Rest>
+struct __constructible_matches_helper<
+ _Offset,_CurrentSequence,_Type,_Head,_Rest...>{
+ typedef
+ typename __constructible_matches_helper<
+ _Offset+1,
+ typename __combine_sequences<
+ _CurrentSequence,
+ typename __sequence_or_empty<
+ std::is_constructible<_Head,_Type>::value,
+ _Offset>::__type>::__type,
+ _Type,_Rest...>::__type __type;
+};
+
+template<typename _Type,typename ... _Types>
+struct __constructible_matches{
+ typedef typename __constructible_matches_helper<
+ 0,__index_sequence<>,_Type,_Types...>::__type __type;
+};
+
+template<typename _Type,typename ... _Types>
+struct __type_index_to_construct{
+ typedef typename __all_indices<_Type,_Types...>::__type __direct_matches;
+ typedef typename __all_indices<
+ typename std::remove_const<
+ typename std::remove_reference<_Type>::type
+ >::type,_Types...>::__type __value_matches;
+ typedef typename __all_indices<
+ _Type,
+ typename std::remove_const<
+ typename std::remove_reference<_Types>::type
+ >::type...>::__type __rref_matches;
+
+ typedef typename __constructible_matches<_Type,_Types...>::__type
+ __constructibles;
+
+ static_assert(
+ (__direct_matches::__length>0) ||
+ (__value_matches::__length>0) ||
+ (__rref_matches::__length>0) ||
+ (__constructibles::__length==1),
+ "For conversion construction of variants, exactly one type must be constructible");
+
+ typedef typename __combine_sequences<
+ __direct_matches,__value_matches,__rref_matches,
+ __constructibles>::__type __all_matches;
+
+ static constexpr ptrdiff_t __value=__first_index<__all_matches>::__value;
+};
+
+struct __replace_construct_helper{
+ template<
+ ptrdiff_t _Index,
+ bool __construct_directly,
+ bool __indexed_type_has_nothrow_move,
+ bool __other_types_have_nothrow_move>
+ struct __helper;
+
+ template<typename _Variant,
+ typename _Indices=typename __variant_indices<_Variant>::__type>
+ struct __op_table;
+};
+
+template<
+ ptrdiff_t _Index,
+ bool __other_types_have_nothrow_move>
+struct __replace_construct_helper::__helper<
+ _Index,false,true,__other_types_have_nothrow_move>{
+
+ template<typename _Variant,typename ... _Args>
+ static void __trampoline(_Variant& __v,_Args&& ... __args){
+ __v.template __two_stage_replace<_Index>(__args...);
+ }
+};
+
+template<
+ ptrdiff_t _Index,
+ bool __indexed_type_has_nothrow_move,
+ bool __other_types_have_nothrow_move>
+struct __replace_construct_helper::__helper<
+ _Index,true,__indexed_type_has_nothrow_move,
+ __other_types_have_nothrow_move>{
+
+ template<typename _Variant,typename ... _Args>
+ static void __trampoline(_Variant& __v,_Args&& ... __args){
+ __v.template __direct_replace<_Index>(std::forward<_Args>(__args)...);
+ }
+};
+
+
+template<
+ ptrdiff_t _Index>
+struct __replace_construct_helper::__helper<
+ _Index,false,false,true>{
+
+ template<typename _Variant,typename ... _Args>
+ static void __trampoline(_Variant& __v,_Args&& ... __args){
+ __v.template __local_backup_replace<_Index>(std::forward<_Args>(__args)...);
+ }
+};
+
+template<
+ ptrdiff_t _Index>
+struct __replace_construct_helper::__helper<
+ _Index,false,false,false>{
+
+ template<typename _Variant,typename ... _Args>
+ static void __trampoline(_Variant& __v,_Args&& ... __args){
+ __v.template __direct_replace<_Index>(std::forward<_Args>(__args)...);
+ }
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+struct __replace_construct_helper::__op_table<_Variant,__index_sequence<_Indices...>>{
+ typedef void(* const __move_func_type)(_Variant*,_Variant&);
+ typedef void(* const __copy_func_type)(_Variant*,_Variant const&);
+
+ template<ptrdiff_t _Index>
+ static void __move_assign_func(
+ _Variant * __lhs,_Variant& __rhs){
+ __lhs->template __replace_construct<_Index>(std::move(get<_Index>(__rhs)));
+ __rhs.__destroy_self();
+ }
+
+ template<ptrdiff_t _Index>
+ static void __copy_assign_func(
+ _Variant * __lhs,_Variant const& __rhs){
+ __lhs->template __replace_construct<_Index>(get<_Index>(__rhs));
+ }
+
+ static const __move_func_type __move_assign[sizeof...(_Indices)];
+ static const __copy_func_type __copy_assign[sizeof...(_Indices)];
+};
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __replace_construct_helper::__op_table<
+ _Variant,__index_sequence<_Indices...>>::__move_func_type
+__replace_construct_helper::__op_table<
+ _Variant,__index_sequence<_Indices...>>::__move_assign[
+ sizeof...(_Indices)]={
+ &__move_assign_func<_Indices>...
+ };
+
+template<typename _Variant,ptrdiff_t ... _Indices>
+const typename __replace_construct_helper::__op_table<
+ _Variant,__index_sequence<_Indices...>>::__copy_func_type
+__replace_construct_helper::__op_table<
+ _Variant,__index_sequence<_Indices...>>::__copy_assign[
+ sizeof...(_Indices)]={
+ &__copy_assign_func<_Indices>...
+ };
+
+template<ptrdiff_t _Index,ptrdiff_t _MaskIndex,typename _Storage>
+struct __backup_storage_ops{
+ static void __move_construct_func(
+ _Storage * __dest,_Storage& __source){
+ new(__dest) _Storage(
+ in_place<_Index>,
+ std::move(__source.__get(in_place<_Index>)));
+ }
+ static void __destroy_func(_Storage * __obj){
+ __obj->__destroy(in_place<_Index>);
+ };
+};
+
+template<ptrdiff_t _Index,typename _Storage>
+struct __backup_storage_ops<_Index,_Index,_Storage>{
+ static void __move_construct_func(_Storage *,_Storage&){
+ __THROW_EXCEPTION(std::bad_alloc());
+ };
+ static void __destroy_func(_Storage *){
+ __THROW_EXCEPTION(std::bad_alloc());
+ };
+};
+
+template<ptrdiff_t _MaskIndex,typename _Storage,typename _Indices>
+struct __backup_storage_op_table;
+
+template<ptrdiff_t _MaskIndex,typename _Storage,ptrdiff_t ... _Indices>
+struct __backup_storage_op_table<
+ _MaskIndex,_Storage,__index_sequence<_Indices...> >
+{
+ typedef void (*__move_func_type)(_Storage * __dest,_Storage& __source);
+ typedef void (*__destroy_func_type)(_Storage * __obj);
+
+ template<size_t _Index>
+ struct __helper{
+ typedef __backup_storage_ops<_Index,_MaskIndex,_Storage> __ops;
+ };
+
+ static const __move_func_type __move_ops[sizeof...(_Indices)];
+ static const __destroy_func_type __destroy_ops[sizeof...(_Indices)];
+};
+
+template<ptrdiff_t _MaskIndex,typename _Storage,ptrdiff_t ... _Indices>
+const typename __backup_storage_op_table<
+ _MaskIndex,_Storage,__index_sequence<_Indices...> >::__move_func_type
+__backup_storage_op_table<
+ _MaskIndex,_Storage,__index_sequence<_Indices...> >::__move_ops[
+ sizeof...(_Indices)]={
+ &__helper<_Indices>::__ops::__move_construct_func...
+ };
+
+template<ptrdiff_t _MaskIndex,typename _Storage,ptrdiff_t ... _Indices>
+const typename __backup_storage_op_table<
+ _MaskIndex,_Storage,__index_sequence<_Indices...> >::__destroy_func_type
+__backup_storage_op_table<
+ _MaskIndex,_Storage,__index_sequence<_Indices...> >::__destroy_ops[
+ sizeof...(_Indices)]={
+ &__helper<_Indices>::__ops::__destroy_func...
+ };
+
+template<ptrdiff_t _Index,typename ... _Types>
+struct __backup_storage{
+ typedef __variant_data<_Types...> __storage_type;
+
+ typedef __backup_storage_op_table<
+ _Index,__storage_type,typename __type_indices<_Types...>::__type>
+ __op_table_type;
+
+ ptrdiff_t __backup_index;
+ __storage_type& __live_storage;
+ __storage_type __backup;
+
+ __backup_storage(ptrdiff_t __live_index_,__storage_type& __live_storage_):
+ __backup_index(__live_index_),__live_storage(__live_storage_){
+ if(__backup_index>=0){
+ __op_table_type::__move_ops[__backup_index](
+ &__backup,__live_storage);
+ __op_table_type::__destroy_ops[__backup_index](
+ &__live_storage);
+ }
+ }
+ void __destroy(){
+ if(__backup_index>=0)
+ __op_table_type::__destroy_ops[__backup_index](
+ &__backup);
+ __backup_index=-1;
+ }
+
+ ~__backup_storage(){
+ if(__backup_index>=0){
+ __op_table_type::__move_ops[__backup_index](
+ &__live_storage,__backup);
+ __destroy();
+ }
+ }
+};
+
+template<typename ... _Types>
+struct __all_move_constructible;
+
+template<typename _Head,typename ... _Rest>
+struct __all_move_constructible<_Head,_Rest...>
+{
+ static constexpr bool value=std::is_move_constructible<_Head>::value && __all_move_constructible<_Rest...>::value;
+};
+
+template<>
+struct __all_move_constructible<>:
+ std::true_type{};
+
+template<typename ... _Types>
+struct __all_move_assignable;
+
+template<typename _Head,typename ... _Rest>
+struct __all_move_assignable<_Head,_Rest...>
+{
+ static constexpr bool value=std::is_move_assignable<_Head>::value && __all_move_assignable<_Rest...>::value;
+};
+
+template<>
+struct __all_move_assignable<>:
+ std::true_type{};
+
+template<typename ... _Types>
+struct __all_copy_assignable;
+
+template<typename _Head,typename ... _Rest>
+struct __all_copy_assignable<_Head,_Rest...>
+{
+ static constexpr bool value=std::is_copy_assignable<_Head>::value && __all_copy_assignable<_Rest...>::value;
+};
+
+template<>
+struct __all_copy_assignable<>:
+ std::true_type{};
+
+namespace __swap_test_detail{
+using std::swap;
+
+template<typename _Other>
+struct __swap_result{};
+
+template<typename>
+static char __test(...);
+template <typename _Other>
+static std::pair<char, std::pair<char, __swap_result<decltype(
+ swap(std::declval<_Other &>(),std::declval<_Other &>()))>>>
+__test(_Other *);
+}
+
+template <typename _Type> struct __is_swappable {
+ static constexpr bool value =
+ sizeof(__swap_test_detail::__test<_Type>(0)) != 1;
+};
+
+template<typename ... _Types>
+struct __all_swappable;
+
+template<typename _Head,typename ... _Rest>
+struct __all_swappable<_Head,_Rest...>
+{
+ static constexpr bool value=__is_swappable<_Head>::value && __all_swappable<_Rest...>::value;
+};
+
+template<>
+struct __all_swappable<>:
+ std::true_type{};
+
+template<bool _MoveConstructible,typename ... _Types>
+struct __noexcept_variant_move_construct_impl{};
+
+template<typename _Head,typename ... _Rest>
+struct __noexcept_variant_move_construct_impl<true,_Head,_Rest...>{
+ static constexpr bool value=noexcept(_Head(std::declval<_Head&&>())) && __noexcept_variant_move_construct_impl<true,_Rest...>::value;
+};
+
+template<>
+struct __noexcept_variant_move_construct_impl<true>{
+ static constexpr bool value=true;
+};
+
+template<typename ... _Types>
+struct __noexcept_variant_move_construct:
+__noexcept_variant_move_construct_impl<__all_move_constructible<_Types...>::value,_Types...>
+{};
+
+template<bool _MoveAssignable,typename ... _Types>
+struct __noexcept_variant_move_assign_impl{};
+
+template <typename _Head, typename... _Rest>
+struct __noexcept_variant_move_assign_impl<true, _Head, _Rest...> {
+ static constexpr bool value =
+ std::is_nothrow_move_assignable<_Head>::value &&
+ std::is_nothrow_move_constructible<_Head>::value &&
+ __noexcept_variant_move_assign_impl<true, _Rest...>::value;
+};
+
+template<>
+struct __noexcept_variant_move_assign_impl<true>{
+ static constexpr bool value=true;
+};
+
+template <typename... _Types>
+struct __noexcept_variant_move_assign
+ : __noexcept_variant_move_assign_impl<
+ __all_move_assignable<_Types...>::value &&
+ __all_move_constructible<_Types...>::value,
+ _Types...> {};
+
+template<typename ... _Types>
+struct __all_copy_constructible;
+
+template<typename _Head,typename ... _Rest>
+struct __all_copy_constructible<_Head,_Rest...>
+{
+ static constexpr bool value=std::is_copy_constructible<_Head>::value && __all_copy_constructible<_Rest...>::value;
+};
+
+template<>
+struct __all_copy_constructible<>:
+ std::true_type{};
+
+template<bool _CopyConstructible,typename ... _Types>
+struct __noexcept_variant_const_copy_construct_impl{};
+
+template<typename _Head,typename ... _Rest>
+struct __noexcept_variant_const_copy_construct_impl<true,_Head,_Rest...>{
+ static constexpr bool value=noexcept(_Head(std::declval<_Head const&>())) && __noexcept_variant_const_copy_construct_impl<true,_Rest...>::value;
+};
+
+template<>
+struct __noexcept_variant_const_copy_construct_impl<true>{
+ static constexpr bool value=true;
+};
+
+template<typename ... _Types>
+struct __noexcept_variant_const_copy_construct:
+__noexcept_variant_const_copy_construct_impl<__all_copy_constructible<_Types...>::value,_Types...>
+{};
+
+template<bool _CopyNon_Constructible,typename ... _Types>
+struct __noexcept_variant_non_const_copy_construct_impl{};
+
+template<typename _Head,typename ... _Rest>
+struct __noexcept_variant_non_const_copy_construct_impl<true,_Head,_Rest...>{
+ static constexpr bool value=noexcept(_Head(std::declval<_Head&>())) && __noexcept_variant_non_const_copy_construct_impl<true,_Rest...>::value;
+};
+
+template<>
+struct __noexcept_variant_non_const_copy_construct_impl<true>{
+ static constexpr bool value=true;
+};
+
+template<typename ... _Types>
+struct __noexcept_variant_non_const_copy_construct:
+__noexcept_variant_non_const_copy_construct_impl<__all_copy_constructible<_Types...>::value,_Types...>
+{};
+
+template<bool _Swappable,typename ... _Types>
+struct __noexcept_variant_swap_impl{};
+
+template <typename _Head, typename... _Rest>
+struct __noexcept_variant_swap_impl<true, _Head, _Rest...> {
+ static constexpr bool value =
+ noexcept(swap(std::declval<_Head&>(),std::declval<_Head&>())) &&
+ __noexcept_variant_swap_impl<true, _Rest...>::value;
+};
+
+template<>
+struct __noexcept_variant_swap_impl<true>{
+ static constexpr bool value=true;
+};
+
+template<typename ... _Types>
+struct __noexcept_variant_swap:
+__noexcept_variant_swap_impl<__all_swappable<_Types...>::value,_Types...>
+{};
+
+template<typename ... _Types>
+class Variant:
+ private __variant_base<
+ Variant<_Types...>,__all_trivially_destructible<_Types...>::__value>
+{
+ typedef __variant_base<Variant<_Types...>,__all_trivially_destructible<_Types...>::__value> __base_type;
+ friend __base_type;
+ friend struct __copy_construct_op_table<Variant>;
+ friend struct __copy_assign_op_table<Variant>;
+ friend struct __move_construct_op_table<Variant>;
+ friend struct __move_assign_op_table<Variant>;
+ friend struct __destroy_op_table<Variant>;
+
+ template<ptrdiff_t _Index,typename ... _Types2>
+ friend struct __variant_accessor;
+
+ friend struct __replace_construct_helper;
+
+ typedef __variant_data<_Types...> __storage_type;
+ __storage_type __storage;
+ typename __discriminator_type<sizeof ... (_Types)>::__type __index;
+
+ template<size_t _Index,typename ... _Args>
+ size_t __emplace_construct(_Args&& ... __args){
+ new(&__storage) __storage_type(
+ in_place<_Index>,std::forward<_Args>(__args)...);
+ return _Index;
+ }
+
+ void __destroy_self(){
+ if(valueless_by_exception())
+ return;
+ __destroy_op_table<Variant>::__apply[index()](this);
+ __index=-1;
+ }
+
+ ptrdiff_t __move_construct(Variant& __other){
+ ptrdiff_t const __other_index=__other.index();
+ if(__other_index==-1)
+ return -1;
+ __move_construct_op_table<Variant>::__apply[__other_index](this,__other);
+ __other.__destroy_self();
+ return __other_index;
+ }
+
+ ptrdiff_t __copy_construct(Variant const& __other){
+ ptrdiff_t const __other_index=__other.index();
+ if(__other_index==-1)
+ return -1;
+ __copy_construct_op_table<Variant>::__apply[__other_index](this,__other);
+ return __other_index;
+ }
+
+ template<size_t _Index,typename ... _Args>
+ void __replace_construct(_Args&& ... __args){
+ typedef typename __indexed_type<_Index,_Types...>::__type __this_type;
+ __replace_construct_helper::__helper<
+ _Index,
+ __storage_nothrow_constructible<__this_type,_Args...>::__value ||
+ (sizeof...(_Types)==1),
+ __storage_nothrow_move_constructible<__this_type>::__value,
+ __other_storage_nothrow_move_constructible<
+ _Index,_Types...>::__value
+ >::__trampoline(*this,std::forward<_Args>(__args)...);
+ }
+
+ template<size_t _Index,typename ... _Args>
+ void __two_stage_replace(_Args&& ... __args){
+ typedef typename __indexed_type<_Index,_Types...>::__type __type;
+ __variant_data<__type> __local(
+ in_place<0>,std::forward<_Args>(__args)...);
+ __destroy_self();
+ __emplace_construct<_Index>(
+ std::move(__local.__get(in_place<0>)));
+ __index=_Index;
+ __local.__destroy(in_place<0>);
+ }
+
+ template<size_t _Index,typename ... _Args>
+ void __local_backup_replace(_Args&& ... __args){
+ __backup_storage<_Index,_Types...> __backup(__index,__storage);
+ __emplace_construct<_Index>(std::forward<_Args>(__args)...);
+ __index=_Index;
+ __backup.__destroy();
+ }
+
+ template<size_t _Index,typename ... _Args>
+ void __direct_replace(_Args&& ... __args) {
+ __destroy_self();
+ __emplace_construct<_Index>(std::forward<_Args>(__args)...);
+ __index=_Index;
+ }
+
+ struct __private_type{};
+
+public:
+ constexpr Variant()
+ __NOEXCEPT_(noexcept(typename __indexed_type<0,_Types...>::__type())):
+ __storage(in_place<0>),
+ __index(0)
+ {}
+
+ constexpr Variant(typename std::conditional<__all_move_constructible<_Types...>::value,Variant,__private_type>::type&& __other)
+ __NOEXCEPT_(__noexcept_variant_move_construct<_Types...>::value):
+ __index(__move_construct(__other))
+ {}
+
+ constexpr Variant(typename std::conditional<!__all_move_constructible<_Types...>::value,Variant,__private_type>::type&& __other)=delete;
+
+ constexpr Variant(typename std::conditional<__all_copy_constructible<_Types...>::value,Variant,__private_type>::type& __other)
+ __NOEXCEPT_(__noexcept_variant_non_const_copy_construct<_Types...>::value):
+ __index(__copy_construct(__other))
+ {}
+
+ constexpr Variant(typename std::conditional<!__all_copy_constructible<_Types...>::value,Variant,__private_type>::type& __other)=delete;
+
+ constexpr Variant(typename std::conditional<__all_copy_constructible<_Types...>::value,Variant,__private_type>::type const& __other)
+ __NOEXCEPT_(__noexcept_variant_const_copy_construct<_Types...>::value):
+ __index(__copy_construct(__other))
+ {}
+
+ constexpr Variant(typename std::conditional<!__all_copy_constructible<_Types...>::value,Variant,__private_type>::type const& __other)=delete;
+
+ template<typename _Type,typename ... _Args>
+ explicit constexpr Variant(in_place_type_t<_Type>,_Args&& ... __args):
+ __storage(
+ in_place<__type_index<_Type,_Types...>::__value>,
+ std::forward<_Args>(__args)...),
+ __index(__type_index<_Type,_Types...>::__value)
+ {
+ static_assert(std::is_constructible<_Type,_Args...>::value,"Type must be constructible from args");
+ }
+
+ template<size_t _Index,typename ... _Args>
+ explicit constexpr Variant(in_place_index_t<_Index>,_Args&& ... __args):
+ __storage(in_place<_Index>,std::forward<_Args>(__args)...),
+ __index(_Index)
+ {
+ static_assert(std::is_constructible<typename __indexed_type<_Index,_Types...>::__type,_Args...>::value,"Type must be constructible from args");
+ }
+
+ template<typename _Type>
+ constexpr Variant(_Type&& __x):
+ __storage(
+ in_place<
+ __type_index_to_construct<_Type,_Types...>::__value>,
+ std::forward<_Type>(__x)),
+ __index(__type_index_to_construct<_Type,_Types...>::__value)
+ {}
+
+ template<typename _Type,
+ typename _Enable=
+ typename std::enable_if<
+ (__constructible_matches<std::initializer_list<_Type>,_Types...>::__type::__length>0)
+ >::type>
+ constexpr Variant(std::initializer_list<_Type> __x):
+ __storage(
+ in_place<
+ __type_index_to_construct<std::initializer_list<_Type>,_Types...>::__value>,
+ __x),
+ __index(__type_index_to_construct<std::initializer_list<_Type>,_Types...>::__value)
+ {}
+
+ template<typename _Type>
+ Variant& operator=(_Type&& __x){
+ constexpr size_t _Index=
+ __type_index_to_construct<_Type,_Types...>::__value;
+ if(_Index==__index){
+ get<_Index>(*this)=std::forward<_Type>(__x);
+ }
+ else{
+ __replace_construct<_Index>(std::forward<_Type>(__x));
+ }
+ return *this;
+ }
+
+ Variant &operator=(
+ typename std::conditional<
+ !(__all_copy_constructible<_Types...>::value &&
+ __all_move_constructible<_Types...>::value &&
+ __all_copy_assignable<_Types...>::value),
+ Variant, __private_type>::type const &__other) = delete;
+
+ Variant &operator=(
+ typename std::conditional<
+ __all_copy_constructible<_Types...>::value &&
+ __all_move_constructible<_Types...>::value &&
+ __all_copy_assignable<_Types...>::value,
+ Variant, __private_type>::type const &__other) {
+ if (__other.valueless_by_exception()) {
+ __destroy_self();
+ }
+ else if(__other.index()==index()){
+ __copy_assign_op_table<Variant>::__apply[index()](this,__other);
+ }
+ else{
+ __replace_construct_helper::__op_table<Variant>::__copy_assign[
+ __other.index()](this,__other);
+ }
+ return *this;
+ }
+ Variant &operator=(
+ typename std::conditional<
+ !(__all_copy_constructible<_Types...>::value &&
+ __all_move_constructible<_Types...>::value &&
+ __all_copy_assignable<_Types...>::value),
+ Variant, __private_type>::type &__other) = delete;
+
+ Variant &operator=(
+ typename std::conditional<
+ __all_copy_constructible<_Types...>::value &&
+ __all_move_constructible<_Types...>::value &&
+ __all_copy_assignable<_Types...>::value,
+ Variant, __private_type>::type &__other) {
+ if(__other.valueless_by_exception()){
+ __destroy_self();
+ }
+ else if(__other.index()==index()){
+ __copy_assign_op_table<Variant>::__apply[index()](this,__other);
+ }
+ else{
+ __replace_construct_helper::__op_table<Variant>::__copy_assign[
+ __other.index()](this,__other);
+ }
+ return *this;
+ }
+ Variant &operator=(
+ typename std::conditional<
+ !(__all_move_constructible<_Types...>::value &&
+ __all_move_assignable<_Types...>::value),
+ Variant, __private_type>::type &&__other) = delete;
+
+ Variant &operator=(
+ typename std::conditional<__all_move_constructible<_Types...>::value &&
+ __all_move_assignable<_Types...>::value,
+ Variant, __private_type>::type &&
+ __other) __NOEXCEPT_(__noexcept_variant_move_assign<_Types...>::value) {
+ if (__other.valueless_by_exception()) {
+ __destroy_self();
+ }
+ else if(__other.index()==index()){
+ __move_assign_op_table<Variant>::__apply[index()](this,__other);
+ __other.__destroy_self();
+ }
+ else{
+ __replace_construct_helper::__op_table<Variant>::__move_assign[
+ __other.index()](this,__other);
+ }
+ return *this;
+ }
+
+ template<typename _Type,typename ... _Args>
+ void emplace(_Args&& ... __args){
+ __direct_replace<__type_index<_Type,_Types...>::__value>(
+ std::forward<_Args>(__args)...);
+ }
+
+ template<size_t _Index,typename ... _Args>
+ void emplace(_Args&& ... __args){
+ __direct_replace<_Index>(std::forward<_Args>(__args)...);
+ }
+
+ constexpr bool valueless_by_exception() const __NOEXCEPT{
+ return __index==-1;
+ }
+ constexpr ptrdiff_t index() const __NOEXCEPT{
+ return __index;
+ }
+
+ void swap(
+ typename std::conditional<
+ __all_swappable<_Types...>::value &&
+ __all_move_constructible<_Types...>::value,
+ Variant, __private_type>::type
+ &__other) __NOEXCEPT_(__noexcept_variant_swap<_Types...>::value) {
+ if (__other.index() == index()) {
+ if(!valueless_by_exception())
+ __swap_op_table<Variant>::__apply[index()](*this,__other);
+ }
+ else{
+ Variant __temp(std::move(__other));
+ __other.__index=__other.__move_construct(*this);
+ __index=__move_construct(__temp);
+ }
+ }
+};
+
+template<>
+class Variant<>{
+public:
+ Variant()=delete;
+
+ constexpr bool valueless_by_exception() const __NOEXCEPT{
+ return true;
+ }
+ constexpr ptrdiff_t index() const __NOEXCEPT{
+ return -1;
+ }
+
+ void swap(Variant&){}
+};
+
+template <typename... _Types>
+typename std::enable_if<__all_swappable<_Types...>::value &&
+ __all_move_constructible<_Types...>::value,
+ void>::type
+swap(Variant<_Types...> &__lhs, Variant<_Types...> &__rhs) __NOEXCEPT_(
+ __noexcept_variant_swap<_Types...>::value) {
+ __lhs.swap(__rhs);
+}
+
+template<ptrdiff_t _Index,typename ... _Types>
+struct __variant_accessor{
+ typedef typename __indexed_type<_Index,_Types...>::__type __type;
+ static constexpr __type& get(Variant<_Types...>& __v){
+ return __v.__storage.__get(in_place<_Index>);
+ }
+ static constexpr __type const& get(Variant<_Types...> const& __v){
+ return __v.__storage.__get(in_place<_Index>);
+ }
+ static constexpr __type&& get(Variant<_Types...>&& __v){
+ return __v.__storage.__get_rref(in_place<_Index>);
+ }
+ static constexpr const __type&& get(Variant<_Types...> const&& __v){
+ return __v.__storage.__get_rref(in_place<_Index>);
+ }
+};
+
+template<typename _Type,typename ... _Types>
+constexpr _Type& get(Variant<_Types...>& __v){
+ return get<__type_index<_Type,_Types...>::__value>(__v);
+}
+
+template<typename _Type,typename ... _Types>
+constexpr _Type&& get(Variant<_Types...>&& __v){
+ return get<__type_index<_Type,_Types...>::__value>(std::move(__v));
+}
+
+template<typename _Type,typename ... _Types>
+constexpr _Type const& get(Variant<_Types...> const& __v){
+ return get<__type_index<_Type,_Types...>::__value>(__v);
+}
+
+template<typename _Type,typename ... _Types>
+constexpr const _Type&& get(Variant<_Types...> const&& __v){
+ return get<__type_index<_Type,_Types...>::__value>(std::move(__v));
+}
+
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr typename __indexed_type<_Index,_Types...>::__type const& get(Variant<_Types...> const& __v){
+ return *(
+ (_Index!=__v.index())
+ ? &__throw_bad_variant_access<typename __indexed_type<_Index,_Types...>::__type const&>("Bad Variant index in get")
+ : &__variant_accessor<_Index,_Types...>::get(__v)
+ );
+}
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr typename __indexed_type<_Index,_Types...>::__type& get(Variant<_Types...>& __v){
+ return *(
+ (_Index!=__v.index())
+ ? &__throw_bad_variant_access<typename __indexed_type<_Index,_Types...>::__type&>("Bad Variant index in get")
+ : &__variant_accessor<_Index,_Types...>::get(__v)
+ );
+}
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr typename __indexed_type<_Index,_Types...>::__type&& get(Variant<_Types...>&& __v){
+ return __variant_accessor<_Index,_Types...>::get(
+ (((_Index!=__v.index()) ? __throw_bad_variant_access<int>("Bad Variant index in get") : 0), std::move(__v))
+ );
+}
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr const typename __indexed_type<_Index,_Types...>::__type&& get(Variant<_Types...> const&& __v){
+ return __variant_accessor<_Index,_Types...>::get(
+ (((_Index!=__v.index()) ? __throw_bad_variant_access<int>("Bad Variant index in get") : 0), std::move(__v))
+ );
+}
+
+template<typename _Type,typename ... _Types>
+constexpr std::add_pointer_t<_Type> get_if(Variant<_Types...>& __v){
+ return (__type_index<_Type,_Types...>::__value!=__v.index())?nullptr:&get<_Type>(__v);
+}
+
+template<typename _Type,typename ... _Types>
+constexpr std::add_pointer_t<_Type const> get_if(Variant<_Types...> const& __v){
+ return (__type_index<_Type,_Types...>::__value!=__v.index())?nullptr:&get<_Type>(__v);
+}
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr std::add_pointer_t<typename __indexed_type<_Index,_Types...>::__type> get_if(Variant<_Types...>& __v){
+ return ((_Index!=__v.index())?nullptr:
+ &__variant_accessor<_Index,_Types...>::get(__v));
+}
+
+template<ptrdiff_t _Index,typename ... _Types>
+constexpr std::add_pointer_t<typename __indexed_type<_Index,_Types...>::__type const> get_if(
+ Variant<_Types...> const& __v){
+ return ((_Index!=__v.index())?nullptr:
+ &__variant_accessor<_Index,_Types...>::get(__v));
+}
+
+template<typename _Type,typename ... _Types>
+constexpr bool holds_alternative(Variant<_Types...> const& __v) __NOEXCEPT{
+ return __v.index()==__type_index<_Type,_Types...>::__value;
+}
+
+template<typename _Visitor,typename ... _Types>
+struct __visitor_return_type;
+
+template<typename _Visitor>
+struct __visitor_return_type<_Visitor>{
+ typedef decltype(std::declval<_Visitor&>()()) __type;
+};
+
+template<typename _Visitor,typename _Head,typename ... _Rest>
+struct __visitor_return_type<_Visitor,_Head,_Rest...>{
+ typedef decltype(std::declval<_Visitor&>()(std::declval<_Head&>())) __type;
+};
+
+template<typename _Visitor,typename ... _Types>
+struct __visitor_table{
+ typedef Variant<_Types...> __variant_type;
+ typedef typename __visitor_return_type<_Visitor,_Types...>::__type __return_type;
+ typedef __return_type (*__func_type)(_Visitor&,__variant_type&);
+
+ template<typename _Type>
+ static __return_type __trampoline_func(_Visitor& __visitor,__variant_type& __v){
+ return __visitor(get<_Type>(__v));
+ }
+
+ static const __func_type __trampoline[sizeof...(_Types)];
+};
+
+template<typename _Visitor,typename ... _Types>
+const typename __visitor_table<_Visitor,_Types...>::__func_type __visitor_table<_Visitor,_Types...>::__trampoline[sizeof...(_Types)]={
+ &__trampoline_func<_Types>...
+ };
+
+template<typename _Visitor,typename ... _Types>
+constexpr typename __visitor_return_type<_Visitor,_Types...>::__type
+visit(_Visitor&& __visitor,Variant<_Types...>& __v){
+ return (__v.valueless_by_exception())
+ ? __throw_bad_variant_access<typename __visitor_return_type<_Visitor,_Types...>::__type>("Visiting of empty Variant")
+ : __visitor_table<_Visitor,_Types...>::__trampoline[__v.index()](__visitor,__v);
+}
+
+template<typename _Visitor,typename ... _Variants>
+struct __multi_visitor_return_type{
+ typedef decltype(std::declval<_Visitor&>()(get<0>(std::declval<_Variants>())...))
+ __type;
+};
+
+template<size_t _VariantIndex,typename _Indices>
+struct __visit_helper;
+
+template<ptrdiff_t ... _Indices>
+struct __visit_helper<0,__index_sequence<_Indices...>>{
+ template<typename _Visitor,typename ... _Variants>
+ static constexpr typename __multi_visitor_return_type<_Visitor,_Variants...>::__type
+ __visit(_Visitor& __visitor,_Variants& ... __v){
+ return __visitor(get<_Indices>(__v)...);
+ }
+};
+
+template<size_t _Index,typename ... _Args>
+struct __arg_selector_t;
+
+template<typename _Head,typename ... _Rest>
+struct __arg_selector_t<0,_Head,_Rest...>{
+ typedef _Head __type;
+
+ static constexpr __type& __select(_Head& __head,_Rest& ...){
+ return __head;
+ }
+};
+
+template<size_t _Index,typename _Head,typename ... _Rest>
+struct __arg_selector_t<_Index,_Head,_Rest...>{
+ typedef typename __arg_selector_t<_Index-1,_Rest...>::__type __type;
+ static constexpr __type& __select(_Head&,_Rest& ... __rest){
+ return __arg_selector_t<_Index-1,_Rest...>::__select(__rest...);
+ }
+};
+
+template<size_t _Index,typename ... _Args>
+constexpr typename __arg_selector_t<_Index,_Args...>::__type&& __arg_selector(_Args&& ... __args){
+ return std::forward<typename __arg_selector_t<_Index,_Args...>::__type>(
+ __arg_selector_t<_Index,_Args...>::__select(__args...));
+}
+
+template<ptrdiff_t _Index,size_t _VariantIndex,ptrdiff_t ... _Indices>
+struct __visit_helper2{
+ template<typename _Visitor,typename ... _Variants>
+ static constexpr typename __multi_visitor_return_type<_Visitor,_Variants...>::__type
+ __visit(_Visitor& __visitor,_Variants&& ... __v){
+ return (__arg_selector<_VariantIndex-1>(__v...).index()==_Index)
+ ? __visit_helper<_VariantIndex-1,__index_sequence<_Index,_Indices...>>::__visit(__visitor,std::forward<_Variants>(__v)...)
+ : __visit_helper2<_Index-1,_VariantIndex,_Indices...>::__visit(__visitor,std::forward<_Variants>(__v)...);
+ }
+};
+
+template<size_t _VariantIndex,ptrdiff_t ... _Indices>
+struct __visit_helper2<-1,_VariantIndex,_Indices...>{
+ template<typename _Visitor,typename ... _Variants>
+ static constexpr typename __multi_visitor_return_type<_Visitor,_Variants...>::__type
+ __visit(_Visitor&,_Variants&& ...){
+ return __throw_bad_variant_access<typename __multi_visitor_return_type<_Visitor,_Variants...>::__type>("Visiting of empty Variant");
+ }
+};
+
+template<typename _Variant>
+struct __variant_type_count;
+
+template<typename ... _Types>
+struct __variant_type_count<Variant<_Types...>>{
+ static constexpr size_t __value=sizeof...(_Types);
+};
+
+template<typename _Variant>
+struct __variant_type_count<_Variant&>{
+ static constexpr size_t __value=__variant_type_count<_Variant>::__value;
+};
+
+template<typename _Variant>
+struct __variant_type_count<_Variant const&>{
+ static constexpr size_t __value=__variant_type_count<_Variant>::__value;
+};
+
+template<size_t _VariantIndex,ptrdiff_t ... _Indices>
+struct __visit_helper<_VariantIndex,__index_sequence<_Indices...>>{
+
+ template<typename _Visitor,typename ... _Variants>
+ static constexpr typename __multi_visitor_return_type<_Visitor,_Variants...>::__type
+ __visit(_Visitor& __visitor,_Variants&& ... __v){
+ return __visit_helper2<
+ __variant_type_count<
+ typename __arg_selector_t<
+ _VariantIndex-1,_Variants...>::__type>::__value-1,
+ _VariantIndex,_Indices...>::__visit(
+ __visitor,std::forward<_Variants&&>(__v)...);
+ }
+};
+
+template<typename _Visitor,typename ... _Variants>
+constexpr typename __multi_visitor_return_type<_Visitor,_Variants...>::__type
+visit(_Visitor&& __visitor,_Variants&& ... __v){
+ return __visit_helper<sizeof...(_Variants),__index_sequence<>>::__visit(
+ __visitor,std::forward<_Variants>(__v)...);
+}
+
+template<typename ... _Types>
+constexpr bool operator==(Variant<_Types...> const& __lhs,Variant<_Types...> const& __rhs){
+ return (__lhs.index()==__rhs.index()) &&
+ ((__lhs.index()==-1) ||
+ __equality_op_table<Variant<_Types...>>::__equality_compare[__lhs.index()](
+ __lhs,__rhs));
+}
+
+template<typename ... _Types>
+constexpr bool operator!=(Variant<_Types...> const& __lhs,Variant<_Types...> const& __rhs){
+ return !(__lhs==__rhs);
+}
+
+template<typename ... _Types>
+constexpr bool operator<(Variant<_Types...> const& __lhs,Variant<_Types...> const& __rhs){
+ return (__lhs.index()<__rhs.index()) ||
+ ((__lhs.index()==__rhs.index()) &&
+ ((__lhs.index()!=-1) &&
+ __less_than_op_table<Variant<_Types...>>::
+ __less_than_compare[__lhs.index()](__lhs,__rhs)));
+}
+
+template<typename ... _Types>
+constexpr bool operator>(Variant<_Types...> const& __lhs,Variant<_Types...> const& __rhs){
+ return __rhs<__lhs;
+}
+
+template<typename ... _Types>
+constexpr bool operator>=(Variant<_Types...> const& __lhs,Variant<_Types...> const& __rhs){
+ return !(__lhs<__rhs);
+}
+
+template<typename ... _Types>
+constexpr bool operator<=(Variant<_Types...> const& __lhs,Variant<_Types...> const& __rhs){
+ return !(__lhs>__rhs);
+}
+
+struct Monostate{};
+
+constexpr inline bool operator==(Monostate const&,Monostate const&){ return true;}
+constexpr inline bool operator!=(Monostate const&,Monostate const&){ return false;}
+constexpr inline bool operator>=(Monostate const&,Monostate const&){ return true;}
+constexpr inline bool operator<=(Monostate const&,Monostate const&){ return true;}
+constexpr inline bool operator>(Monostate const&,Monostate const&){ return false;}
+constexpr inline bool operator<(Monostate const&,Monostate const&){ return false;}
+
+struct __hash_visitor{
+ template<typename _Type>
+ size_t operator()(_Type const& __x){
+ return std::hash<_Type>()(__x);
+ }
+};
+
+// -- WebKit Additions --
+
+template<class V, class... F>
+auto switchOn(V&& v, F&&... f) -> decltype(visit(makeVisitor(std::forward<F>(f)...), std::forward<V>(v)))
+{
+ return visit(makeVisitor(std::forward<F>(f)...), std::forward<V>(v));
+}
+
+} // namespace WTF
+
+namespace std {
+
+template<>
+struct hash<WTF::Monostate>{
+ size_t operator()(WTF::Monostate) __NOEXCEPT{
+ return 42;
+ }
+};
+
+template<typename ... _Types>
+struct hash<WTF::Variant<_Types...>>{
+ size_t operator()(WTF::Variant<_Types...> const &v) __NOEXCEPT {
+ return std::hash<ptrdiff_t>()(v.index()) ^ WTF::visit(WTF::__hash_visitor(), v);
+ }
+};
+
+} // namespace std
+
+using WTF::Monostate;
+using WTF::Variant;
+
+#endif // !COMPILER(CLANG) || WTF_CPP_STD_VER >= 14
+
+#if COMPILER(MSVC)
+#pragma warning(pop)
+#endif
diff --git a/Source/WTF/wtf/Vector.h b/Source/WTF/wtf/Vector.h
index 964fe5577..6fdd19ad4 100644
--- a/Source/WTF/wtf/Vector.h
+++ b/Source/WTF/wtf/Vector.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2005, 2006, 2007, 2008, 2014 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -30,11 +30,14 @@
#include <wtf/FastMalloc.h>
#include <wtf/MallocPtr.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
#include <wtf/StdLibExtras.h>
#include <wtf/ValueCheck.h>
#include <wtf/VectorTraits.h>
+#if ASAN_ENABLED
+extern "C" void __sanitizer_annotate_contiguous_container(const void* begin, const void* end, const void* old_mid, const void* new_mid);
+#endif
+
namespace WTF {
const size_t notFound = static_cast<size_t>(-1);
@@ -95,7 +98,7 @@ struct VectorMover<false, T>
static void move(T* src, T* srcEnd, T* dst)
{
while (src != srcEnd) {
- new (NotNull, dst) T(std::move(*src));
+ new (NotNull, dst) T(WTFMove(*src));
src->~T();
++dst;
++src;
@@ -110,7 +113,7 @@ struct VectorMover<false, T>
while (src != srcEnd) {
--srcEnd;
--dstEnd;
- new (NotNull, dstEnd) T(std::move(*srcEnd));
+ new (NotNull, dstEnd) T(WTFMove(*srcEnd));
srcEnd->~T();
}
}
@@ -136,10 +139,11 @@ struct VectorCopier;
template<typename T>
struct VectorCopier<false, T>
{
- static void uninitializedCopy(const T* src, const T* srcEnd, T* dst)
+ template<typename U>
+ static void uninitializedCopy(const T* src, const T* srcEnd, U* dst)
{
while (src != srcEnd) {
- new (NotNull, dst) T(*src);
+ new (NotNull, dst) U(*src);
++dst;
++src;
}
@@ -149,10 +153,15 @@ struct VectorCopier<false, T>
template<typename T>
struct VectorCopier<true, T>
{
- static void uninitializedCopy(const T* src, const T* srcEnd, T* dst)
+ static void uninitializedCopy(const T* src, const T* srcEnd, T* dst)
{
memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - reinterpret_cast<const char*>(src));
}
+ template<typename U>
+ static void uninitializedCopy(const T* src, const T* srcEnd, U* dst)
+ {
+ VectorCopier<false, T>::uninitializedCopy(src, srcEnd, dst);
+ }
};
template <bool canFillWithMemset, typename T>
@@ -176,7 +185,7 @@ struct VectorFiller<true, T>
static void uninitializedFill(T* dst, T* dstEnd, const T& val)
{
static_assert(sizeof(T) == 1, "Size of type T should be equal to one!");
-#if COMPILER(GCC) && defined(_FORTIFY_SOURCE)
+#if COMPILER(GCC_OR_CLANG) && defined(_FORTIFY_SOURCE)
if (!__builtin_constant_p(dstEnd - dst) || (!(dstEnd - dst)))
#endif
memset(dst, val, dstEnd - dst);
@@ -255,7 +264,7 @@ public:
ASSERT(newCapacity);
if (newCapacity > std::numeric_limits<unsigned>::max() / sizeof(T))
CRASH();
- size_t sizeToAllocate = fastMallocGoodSize(newCapacity * sizeof(T));
+ size_t sizeToAllocate = newCapacity * sizeof(T);
m_capacity = sizeToAllocate / sizeof(T);
m_buffer = static_cast<T*>(fastMalloc(sizeToAllocate));
}
@@ -266,7 +275,7 @@ public:
if (newCapacity > std::numeric_limits<unsigned>::max() / sizeof(T))
return false;
- size_t sizeToAllocate = fastMallocGoodSize(newCapacity * sizeof(T));
+ size_t sizeToAllocate = newCapacity * sizeof(T);
T* newBuffer;
if (tryFastMalloc(sizeToAllocate).getValue(newBuffer)) {
m_capacity = sizeToAllocate / sizeof(T);
@@ -286,7 +295,7 @@ public:
ASSERT(shouldReallocateBuffer(newCapacity));
if (newCapacity > std::numeric_limits<size_t>::max() / sizeof(T))
CRASH();
- size_t sizeToAllocate = fastMallocGoodSize(newCapacity * sizeof(T));
+ size_t sizeToAllocate = newCapacity * sizeof(T);
m_capacity = sizeToAllocate / sizeof(T);
m_buffer = static_cast<T*>(fastRealloc(m_buffer, sizeToAllocate));
}
@@ -306,6 +315,7 @@ public:
T* buffer() { return m_buffer; }
const T* buffer() const { return m_buffer; }
+ static ptrdiff_t bufferMemoryOffset() { return OBJECT_OFFSETOF(VectorBufferBase, m_buffer); }
size_t capacity() const { return m_capacity; }
MallocPtr<T> releaseBuffer()
@@ -367,7 +377,7 @@ public:
deallocateBuffer(buffer());
}
- void swap(VectorBuffer<T, 0>& other)
+ void swap(VectorBuffer<T, 0>& other, size_t, size_t)
{
std::swap(m_buffer, other.m_buffer);
std::swap(m_capacity, other.m_capacity);
@@ -375,6 +385,13 @@ public:
void restoreInlineBufferIfNeeded() { }
+#if ASAN_ENABLED
+ void* endOfBuffer()
+ {
+ return buffer() + capacity();
+ }
+#endif
+
using Base::allocateBuffer;
using Base::tryAllocateBuffer;
using Base::shouldReallocateBuffer;
@@ -383,6 +400,7 @@ public:
using Base::buffer;
using Base::capacity;
+ using Base::bufferMemoryOffset;
using Base::releaseBuffer;
@@ -456,20 +474,20 @@ public:
Base::reallocateBuffer(newCapacity);
}
- void swap(VectorBuffer& other)
+ void swap(VectorBuffer& other, size_t mySize, size_t otherSize)
{
if (buffer() == inlineBuffer() && other.buffer() == other.inlineBuffer()) {
- std::swap(m_inlineBuffer, other.m_inlineBuffer);
+ swapInlineBuffer(other, mySize, otherSize);
std::swap(m_capacity, other.m_capacity);
} else if (buffer() == inlineBuffer()) {
m_buffer = other.m_buffer;
other.m_buffer = other.inlineBuffer();
- std::swap(m_inlineBuffer, other.m_inlineBuffer);
+ swapInlineBuffer(other, mySize, 0);
std::swap(m_capacity, other.m_capacity);
} else if (other.buffer() == other.inlineBuffer()) {
other.m_buffer = m_buffer;
m_buffer = inlineBuffer();
- std::swap(m_inlineBuffer, other.m_inlineBuffer);
+ swapInlineBuffer(other, 0, otherSize);
std::swap(m_capacity, other.m_capacity);
} else {
std::swap(m_buffer, other.m_buffer);
@@ -485,8 +503,22 @@ public:
m_capacity = inlineCapacity;
}
+#if ASAN_ENABLED
+ void* endOfBuffer()
+ {
+ ASSERT(buffer());
+ static_assert((offsetof(VectorBuffer, m_inlineBuffer) + sizeof(m_inlineBuffer)) % 8 == 0, "Inline buffer end needs to be on 8 byte boundary for ASan annotations to work.");
+
+ if (buffer() == inlineBuffer())
+ return reinterpret_cast<char*>(m_inlineBuffer) + sizeof(m_inlineBuffer);
+
+ return buffer() + capacity();
+ }
+#endif
+
using Base::buffer;
using Base::capacity;
+ using Base::bufferMemoryOffset;
MallocPtr<T> releaseBuffer()
{
@@ -501,11 +533,41 @@ protected:
private:
using Base::m_buffer;
using Base::m_capacity;
+
+ void swapInlineBuffer(VectorBuffer& other, size_t mySize, size_t otherSize)
+ {
+ // FIXME: We could make swap part of VectorTypeOperations
+ // https://bugs.webkit.org/show_bug.cgi?id=128863
+ swapInlineBuffers(inlineBuffer(), other.inlineBuffer(), mySize, otherSize);
+ }
+
+ static void swapInlineBuffers(T* left, T* right, size_t leftSize, size_t rightSize)
+ {
+ if (left == right)
+ return;
+
+ ASSERT(leftSize <= inlineCapacity);
+ ASSERT(rightSize <= inlineCapacity);
+
+ size_t swapBound = std::min(leftSize, rightSize);
+ for (unsigned i = 0; i < swapBound; ++i)
+ std::swap(left[i], right[i]);
+ VectorTypeOperations<T>::move(left + swapBound, left + leftSize, right + swapBound);
+ VectorTypeOperations<T>::move(right + swapBound, right + rightSize, left + swapBound);
+ }
T* inlineBuffer() { return reinterpret_cast_ptr<T*>(m_inlineBuffer); }
const T* inlineBuffer() const { return reinterpret_cast_ptr<const T*>(m_inlineBuffer); }
+#if ASAN_ENABLED
+ // ASan needs the buffer to begin and end on 8-byte boundaries for annotations to work.
+ // FIXME: Add a redzone before the buffer to catch off by one accesses. We don't need a guard after, because the buffer is the last member variable.
+ static const size_t asanInlineBufferAlignment = std::alignment_of<T>::value >= 8 ? std::alignment_of<T>::value : 8;
+ static const size_t asanAdjustedInlineCapacity = ((sizeof(T) * inlineCapacity + 7) & ~7) / sizeof(T);
+ typename std::aligned_storage<sizeof(T), asanInlineBufferAlignment>::type m_inlineBuffer[asanAdjustedInlineCapacity];
+#else
typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type m_inlineBuffer[inlineCapacity];
+#endif
};
struct UnsafeVectorOverflow {
@@ -515,7 +577,7 @@ struct UnsafeVectorOverflow {
}
};
-template<typename T, size_t inlineCapacity = 0, typename OverflowHandler = CrashOnOverflow>
+template<typename T, size_t inlineCapacity = 0, typename OverflowHandler = CrashOnOverflow, size_t minCapacity = 16>
class Vector : private VectorBuffer<T, inlineCapacity> {
WTF_MAKE_FAST_ALLOCATED;
private:
@@ -538,6 +600,8 @@ public:
explicit Vector(size_t size)
: Base(size, size)
{
+ asanSetInitialBufferSizeTo(size);
+
if (begin())
TypeOperations::initialize(begin(), end());
}
@@ -545,6 +609,8 @@ public:
Vector(size_t size, const T& val)
: Base(size, size)
{
+ asanSetInitialBufferSizeTo(size);
+
if (begin())
TypeOperations::uninitializedFill(begin(), end(), val);
}
@@ -552,6 +618,9 @@ public:
Vector(std::initializer_list<T> initializerList)
{
reserveInitialCapacity(initializerList.size());
+
+ asanSetInitialBufferSizeTo(initializerList.size());
+
for (const auto& element : initializerList)
uncheckedAppend(element);
}
@@ -559,21 +628,24 @@ public:
~Vector()
{
if (m_size)
- shrink(0);
+ TypeOperations::destruct(begin(), end());
+
+ asanSetBufferSizeToFullCapacity(0);
}
Vector(const Vector&);
- template<size_t otherCapacity, typename otherOverflowBehaviour>
- Vector(const Vector<T, otherCapacity, otherOverflowBehaviour>&);
+ template<size_t otherCapacity, typename otherOverflowBehaviour, size_t otherMinimumCapacity>
+ explicit Vector(const Vector<T, otherCapacity, otherOverflowBehaviour, otherMinimumCapacity>&);
Vector& operator=(const Vector&);
- template<size_t otherCapacity, typename otherOverflowBehaviour>
- Vector& operator=(const Vector<T, otherCapacity, otherOverflowBehaviour>&);
+ template<size_t otherCapacity, typename otherOverflowBehaviour, size_t otherMinimumCapacity>
+ Vector& operator=(const Vector<T, otherCapacity, otherOverflowBehaviour, otherMinimumCapacity>&);
Vector(Vector&&);
Vector& operator=(Vector&&);
size_t size() const { return m_size; }
+ static ptrdiff_t sizeMemoryOffset() { return OBJECT_OFFSETOF(Vector, m_size); }
size_t capacity() const { return Base::capacity(); }
bool isEmpty() const { return !size(); }
@@ -607,6 +679,7 @@ public:
T* data() { return Base::buffer(); }
const T* data() const { return Base::buffer(); }
+ static ptrdiff_t dataMemoryOffset() { return Base::bufferMemoryOffset(); }
iterator begin() { return data(); }
iterator end() { return begin() + m_size; }
@@ -625,7 +698,7 @@ public:
T takeLast()
{
- T result = last();
+ T result = WTFMove(last());
removeLast();
return result;
}
@@ -646,9 +719,15 @@ public:
void clear() { shrinkCapacity(0); }
- template<typename U> void append(const U*, size_t);
+ void append(ValueType&& value) { append<ValueType>(std::forward<ValueType>(value)); }
template<typename U> void append(U&&);
- template<typename U> void uncheckedAppend(U&& val);
+ template<typename... Args> void constructAndAppend(Args&&...);
+ template<typename... Args> bool tryConstructAndAppend(Args&&...);
+
+ void uncheckedAppend(ValueType&& value) { uncheckedAppend<ValueType>(std::forward<ValueType>(value)); }
+ template<typename U> void uncheckedAppend(U&&);
+
+ template<typename U> void append(const U*, size_t);
template<typename U, size_t otherCapacity> void appendVector(const Vector<U, otherCapacity>&);
template<typename U> bool tryAppend(const U*, size_t);
@@ -658,6 +737,10 @@ public:
void remove(size_t position);
void remove(size_t position, size_t length);
+ template<typename U> bool removeFirst(const U&);
+ template<typename MatchFunction> bool removeFirstMatching(const MatchFunction&);
+ template<typename U> unsigned removeAll(const U&);
+ template<typename MatchFunction> unsigned removeAllMatching(const MatchFunction&);
void removeLast()
{
@@ -673,10 +756,22 @@ public:
MallocPtr<T> releaseBuffer();
- void swap(Vector<T, inlineCapacity, OverflowHandler>& other)
+ void swap(Vector<T, inlineCapacity, OverflowHandler, minCapacity>& other)
{
+#if ASAN_ENABLED
+ if (this == std::addressof(other)) // ASan will crash if we try to restrict access to the same buffer twice.
+ return;
+#endif
+
+ // Make it possible to copy inline buffers.
+ asanSetBufferSizeToFullCapacity();
+ other.asanSetBufferSizeToFullCapacity();
+
+ Base::swap(other, m_size, other.m_size);
std::swap(m_size, other.m_size);
- Base::swap(other);
+
+ asanSetInitialBufferSizeTo(m_size);
+ other.asanSetInitialBufferSizeTo(other.m_size);
}
void reverse();
@@ -690,6 +785,14 @@ private:
const T* tryExpandCapacity(size_t newMinCapacity, const T*);
template<typename U> U* expandCapacity(size_t newMinCapacity, U*);
template<typename U> void appendSlowCase(U&&);
+ template<typename... Args> void constructAndAppendSlowCase(Args&&...);
+ template<typename... Args> bool tryConstructAndAppendSlowCase(Args&&...);
+
+ void asanSetInitialBufferSizeTo(size_t);
+ void asanSetBufferSizeToFullCapacity(size_t);
+ void asanSetBufferSizeToFullCapacity() { asanSetBufferSizeToFullCapacity(size()); }
+
+ void asanBufferSizeWillChangeTo(size_t);
using Base::m_size;
using Base::buffer;
@@ -702,27 +805,34 @@ private:
using Base::reallocateBuffer;
using Base::restoreInlineBufferIfNeeded;
using Base::releaseBuffer;
+#if ASAN_ENABLED
+ using Base::endOfBuffer;
+#endif
};
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-Vector<T, inlineCapacity, OverflowHandler>::Vector(const Vector& other)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+Vector<T, inlineCapacity, OverflowHandler, minCapacity>::Vector(const Vector& other)
: Base(other.capacity(), other.size())
{
+ asanSetInitialBufferSizeTo(other.size());
+
if (begin())
TypeOperations::uninitializedCopy(other.begin(), other.end(), begin());
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-template<size_t otherCapacity, typename otherOverflowBehaviour>
-Vector<T, inlineCapacity, OverflowHandler>::Vector(const Vector<T, otherCapacity, otherOverflowBehaviour>& other)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+template<size_t otherCapacity, typename otherOverflowBehaviour, size_t otherMinimumCapacity>
+Vector<T, inlineCapacity, OverflowHandler, minCapacity>::Vector(const Vector<T, otherCapacity, otherOverflowBehaviour, otherMinimumCapacity>& other)
: Base(other.capacity(), other.size())
{
+ asanSetInitialBufferSizeTo(other.size());
+
if (begin())
TypeOperations::uninitializedCopy(other.begin(), other.end(), begin());
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-Vector<T, inlineCapacity, OverflowHandler>& Vector<T, inlineCapacity, OverflowHandler>::operator=(const Vector<T, inlineCapacity, OverflowHandler>& other)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+Vector<T, inlineCapacity, OverflowHandler, minCapacity>& Vector<T, inlineCapacity, OverflowHandler, minCapacity>::operator=(const Vector<T, inlineCapacity, OverflowHandler, minCapacity>& other)
{
if (&other == this)
return *this;
@@ -734,12 +844,8 @@ Vector<T, inlineCapacity, OverflowHandler>& Vector<T, inlineCapacity, OverflowHa
reserveCapacity(other.size());
ASSERT(begin());
}
-
-// Works around an assert in VS2010. See https://connect.microsoft.com/VisualStudio/feedback/details/558044/std-copy-should-not-check-dest-when-first-last
-#if COMPILER(MSVC) && defined(_ITERATOR_DEBUG_LEVEL) && _ITERATOR_DEBUG_LEVEL
- if (!begin())
- return *this;
-#endif
+
+ asanBufferSizeWillChangeTo(other.size());
std::copy(other.begin(), other.begin() + size(), begin());
TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end());
@@ -750,9 +856,9 @@ Vector<T, inlineCapacity, OverflowHandler>& Vector<T, inlineCapacity, OverflowHa
inline bool typelessPointersAreEqual(const void* a, const void* b) { return a == b; }
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-template<size_t otherCapacity, typename otherOverflowBehaviour>
-Vector<T, inlineCapacity, OverflowHandler>& Vector<T, inlineCapacity, OverflowHandler>::operator=(const Vector<T, otherCapacity, otherOverflowBehaviour>& other)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+template<size_t otherCapacity, typename otherOverflowBehaviour, size_t otherMinimumCapacity>
+Vector<T, inlineCapacity, OverflowHandler, minCapacity>& Vector<T, inlineCapacity, OverflowHandler, minCapacity>::operator=(const Vector<T, otherCapacity, otherOverflowBehaviour, otherMinimumCapacity>& other)
{
// If the inline capacities match, we should call the more specific
// template. If the inline capacities don't match, the two objects
@@ -767,11 +873,7 @@ Vector<T, inlineCapacity, OverflowHandler>& Vector<T, inlineCapacity, OverflowHa
ASSERT(begin());
}
-// Works around an assert in VS2010. See https://connect.microsoft.com/VisualStudio/feedback/details/558044/std-copy-should-not-check-dest-when-first-last
-#if COMPILER(MSVC) && defined(_ITERATOR_DEBUG_LEVEL) && _ITERATOR_DEBUG_LEVEL
- if (!begin())
- return *this;
-#endif
+ asanBufferSizeWillChangeTo(other.size());
std::copy(other.begin(), other.begin() + size(), begin());
TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end());
@@ -780,29 +882,29 @@ Vector<T, inlineCapacity, OverflowHandler>& Vector<T, inlineCapacity, OverflowHa
return *this;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline Vector<T, inlineCapacity, OverflowHandler>::Vector(Vector<T, inlineCapacity, OverflowHandler>&& other)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline Vector<T, inlineCapacity, OverflowHandler, minCapacity>::Vector(Vector<T, inlineCapacity, OverflowHandler, minCapacity>&& other)
{
swap(other);
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline Vector<T, inlineCapacity, OverflowHandler>& Vector<T, inlineCapacity, OverflowHandler>::operator=(Vector<T, inlineCapacity, OverflowHandler>&& other)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline Vector<T, inlineCapacity, OverflowHandler, minCapacity>& Vector<T, inlineCapacity, OverflowHandler, minCapacity>::operator=(Vector<T, inlineCapacity, OverflowHandler, minCapacity>&& other)
{
swap(other);
return *this;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
template<typename U>
-bool Vector<T, inlineCapacity, OverflowHandler>::contains(const U& value) const
+bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::contains(const U& value) const
{
return find(value) != notFound;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
template<typename U>
-size_t Vector<T, inlineCapacity, OverflowHandler>::find(const U& value) const
+size_t Vector<T, inlineCapacity, OverflowHandler, minCapacity>::find(const U& value) const
{
for (size_t i = 0; i < size(); ++i) {
if (at(i) == value)
@@ -811,9 +913,9 @@ size_t Vector<T, inlineCapacity, OverflowHandler>::find(const U& value) const
return notFound;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
template<typename U>
-size_t Vector<T, inlineCapacity, OverflowHandler>::reverseFind(const U& value) const
+size_t Vector<T, inlineCapacity, OverflowHandler, minCapacity>::reverseFind(const U& value) const
{
for (size_t i = 1; i <= size(); ++i) {
const size_t index = size() - i;
@@ -823,8 +925,8 @@ size_t Vector<T, inlineCapacity, OverflowHandler>::reverseFind(const U& value) c
return notFound;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void Vector<T, inlineCapacity, OverflowHandler>::fill(const T& val, size_t newSize)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::fill(const T& val, size_t newSize)
{
if (size() > newSize)
shrink(newSize);
@@ -833,28 +935,30 @@ void Vector<T, inlineCapacity, OverflowHandler>::fill(const T& val, size_t newSi
reserveCapacity(newSize);
ASSERT(begin());
}
-
+
+ asanBufferSizeWillChangeTo(newSize);
+
std::fill(begin(), end(), val);
TypeOperations::uninitializedFill(end(), begin() + newSize, val);
m_size = newSize;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
template<typename Iterator>
-void Vector<T, inlineCapacity, OverflowHandler>::appendRange(Iterator start, Iterator end)
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::appendRange(Iterator start, Iterator end)
{
for (Iterator it = start; it != end; ++it)
append(*it);
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void Vector<T, inlineCapacity, OverflowHandler>::expandCapacity(size_t newMinCapacity)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::expandCapacity(size_t newMinCapacity)
{
- reserveCapacity(std::max(newMinCapacity, std::max(static_cast<size_t>(16), capacity() + capacity() / 4 + 1)));
+ reserveCapacity(std::max(newMinCapacity, std::max(static_cast<size_t>(minCapacity), capacity() + capacity() / 4 + 1)));
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-T* Vector<T, inlineCapacity, OverflowHandler>::expandCapacity(size_t newMinCapacity, T* ptr)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+T* Vector<T, inlineCapacity, OverflowHandler, minCapacity>::expandCapacity(size_t newMinCapacity, T* ptr)
{
if (ptr < begin() || ptr >= end()) {
expandCapacity(newMinCapacity);
@@ -865,14 +969,14 @@ T* Vector<T, inlineCapacity, OverflowHandler>::expandCapacity(size_t newMinCapac
return begin() + index;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-bool Vector<T, inlineCapacity, OverflowHandler>::tryExpandCapacity(size_t newMinCapacity)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::tryExpandCapacity(size_t newMinCapacity)
{
- return tryReserveCapacity(std::max(newMinCapacity, std::max(static_cast<size_t>(16), capacity() + capacity() / 4 + 1)));
+ return tryReserveCapacity(std::max(newMinCapacity, std::max(static_cast<size_t>(minCapacity), capacity() + capacity() / 4 + 1)));
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-const T* Vector<T, inlineCapacity, OverflowHandler>::tryExpandCapacity(size_t newMinCapacity, const T* ptr)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+const T* Vector<T, inlineCapacity, OverflowHandler, minCapacity>::tryExpandCapacity(size_t newMinCapacity, const T* ptr)
{
if (ptr < begin() || ptr >= end()) {
if (!tryExpandCapacity(newMinCapacity))
@@ -885,21 +989,23 @@ const T* Vector<T, inlineCapacity, OverflowHandler>::tryExpandCapacity(size_t ne
return begin() + index;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-inline U* Vector<T, inlineCapacity, OverflowHandler>::expandCapacity(size_t newMinCapacity, U* ptr)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+inline U* Vector<T, inlineCapacity, OverflowHandler, minCapacity>::expandCapacity(size_t newMinCapacity, U* ptr)
{
expandCapacity(newMinCapacity);
return ptr;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline void Vector<T, inlineCapacity, OverflowHandler>::resize(size_t size)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::resize(size_t size)
{
- if (size <= m_size)
+ if (size <= m_size) {
TypeOperations::destruct(begin() + size, end());
- else {
+ asanBufferSizeWillChangeTo(size);
+ } else {
if (size > capacity())
expandCapacity(size);
+ asanBufferSizeWillChangeTo(size);
if (begin())
TypeOperations::initialize(end(), begin() + size);
}
@@ -907,62 +1013,122 @@ inline void Vector<T, inlineCapacity, OverflowHandler>::resize(size_t size)
m_size = size;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void Vector<T, inlineCapacity, OverflowHandler>::resizeToFit(size_t size)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::resizeToFit(size_t size)
{
reserveCapacity(size);
resize(size);
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void Vector<T, inlineCapacity, OverflowHandler>::shrink(size_t size)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::shrink(size_t size)
{
ASSERT(size <= m_size);
TypeOperations::destruct(begin() + size, end());
+ asanBufferSizeWillChangeTo(size);
m_size = size;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void Vector<T, inlineCapacity, OverflowHandler>::grow(size_t size)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::grow(size_t size)
{
ASSERT(size >= m_size);
if (size > capacity())
expandCapacity(size);
+ asanBufferSizeWillChangeTo(size);
if (begin())
TypeOperations::initialize(end(), begin() + size);
m_size = size;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void Vector<T, inlineCapacity, OverflowHandler>::reserveCapacity(size_t newCapacity)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::asanSetInitialBufferSizeTo(size_t size)
+{
+#if ASAN_ENABLED
+ if (!buffer())
+ return;
+
+ // This function resticts buffer access to only elements in [begin(), end()) range, making ASan detect an error
+ // when accessing elements in [end(), endOfBuffer()) range.
+ // A newly allocated buffer can be accessed without restrictions, so "old_mid" argument equals "end" argument.
+ __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), endOfBuffer(), buffer() + size);
+#else
+ UNUSED_PARAM(size);
+#endif
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::asanSetBufferSizeToFullCapacity(size_t size)
+{
+#if ASAN_ENABLED
+ if (!buffer())
+ return;
+
+ // ASan requires that the annotation is returned to its initial state before deallocation.
+ __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), buffer() + size, endOfBuffer());
+#else
+ UNUSED_PARAM(size);
+#endif
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::asanBufferSizeWillChangeTo(size_t newSize)
+{
+#if ASAN_ENABLED
+ if (!buffer())
+ return;
+
+ // Change allowed range.
+ __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), buffer() + size(), buffer() + newSize);
+#else
+ UNUSED_PARAM(newSize);
+#endif
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::reserveCapacity(size_t newCapacity)
{
if (newCapacity <= capacity())
return;
T* oldBuffer = begin();
T* oldEnd = end();
+
+ asanSetBufferSizeToFullCapacity();
+
Base::allocateBuffer(newCapacity);
ASSERT(begin());
+
+ asanSetInitialBufferSizeTo(size());
+
TypeOperations::move(oldBuffer, oldEnd, begin());
Base::deallocateBuffer(oldBuffer);
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-bool Vector<T, inlineCapacity, OverflowHandler>::tryReserveCapacity(size_t newCapacity)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::tryReserveCapacity(size_t newCapacity)
{
if (newCapacity <= capacity())
return true;
T* oldBuffer = begin();
T* oldEnd = end();
- if (!Base::tryAllocateBuffer(newCapacity))
+
+ asanSetBufferSizeToFullCapacity();
+
+ if (!Base::tryAllocateBuffer(newCapacity)) {
+ asanSetInitialBufferSizeTo(size());
return false;
+ }
ASSERT(begin());
+
+ asanSetInitialBufferSizeTo(size());
+
TypeOperations::move(oldBuffer, oldEnd, begin());
Base::deallocateBuffer(oldBuffer);
return true;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline void Vector<T, inlineCapacity, OverflowHandler>::reserveInitialCapacity(size_t initialCapacity)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::reserveInitialCapacity(size_t initialCapacity)
{
ASSERT(!m_size);
ASSERT(capacity() == inlineCapacity);
@@ -970,8 +1136,8 @@ inline void Vector<T, inlineCapacity, OverflowHandler>::reserveInitialCapacity(s
Base::allocateBuffer(initialCapacity);
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void Vector<T, inlineCapacity, OverflowHandler>::shrinkCapacity(size_t newCapacity)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::shrinkCapacity(size_t newCapacity)
{
if (newCapacity >= capacity())
return;
@@ -979,10 +1145,13 @@ void Vector<T, inlineCapacity, OverflowHandler>::shrinkCapacity(size_t newCapaci
if (newCapacity < size())
shrink(newCapacity);
+ asanSetBufferSizeToFullCapacity();
+
T* oldBuffer = begin();
if (newCapacity > 0) {
if (Base::shouldReallocateBuffer(newCapacity)) {
Base::reallocateBuffer(newCapacity);
+ asanSetInitialBufferSizeTo(size());
return;
}
@@ -994,14 +1163,15 @@ void Vector<T, inlineCapacity, OverflowHandler>::shrinkCapacity(size_t newCapaci
Base::deallocateBuffer(oldBuffer);
Base::restoreInlineBufferIfNeeded();
+
+ asanSetInitialBufferSizeTo(size());
}
// Templatizing these is better than just letting the conversion happen implicitly,
// because for instance it allows a PassRefPtr to be appended to a RefPtr vector
// without refcount thrash.
-
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-void Vector<T, inlineCapacity, OverflowHandler>::append(const U* data, size_t dataSize)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::append(const U* data, size_t dataSize)
{
size_t newSize = m_size + dataSize;
if (newSize > capacity()) {
@@ -1010,14 +1180,14 @@ void Vector<T, inlineCapacity, OverflowHandler>::append(const U* data, size_t da
}
if (newSize < m_size)
CRASH();
+ asanBufferSizeWillChangeTo(newSize);
T* dest = end();
- for (size_t i = 0; i < dataSize; ++i)
- new (NotNull, &dest[i]) T(data[i]);
+ VectorCopier<std::is_trivial<T>::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), dest);
m_size = newSize;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-bool Vector<T, inlineCapacity, OverflowHandler>::tryAppend(const U* data, size_t dataSize)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::tryAppend(const U* data, size_t dataSize)
{
size_t newSize = m_size + dataSize;
if (newSize > capacity()) {
@@ -1028,17 +1198,18 @@ bool Vector<T, inlineCapacity, OverflowHandler>::tryAppend(const U* data, size_t
}
if (newSize < m_size)
return false;
+ asanBufferSizeWillChangeTo(newSize);
T* dest = end();
- for (size_t i = 0; i < dataSize; ++i)
- new (NotNull, &dest[i]) T(data[i]);
+ VectorCopier<std::is_trivial<T>::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), dest);
m_size = newSize;
return true;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-ALWAYS_INLINE void Vector<T, inlineCapacity, OverflowHandler>::append(U&& value)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+ALWAYS_INLINE void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::append(U&& value)
{
if (size() != capacity()) {
+ asanBufferSizeWillChangeTo(m_size + 1);
new (NotNull, end()) T(std::forward<U>(value));
++m_size;
return;
@@ -1047,8 +1218,34 @@ ALWAYS_INLINE void Vector<T, inlineCapacity, OverflowHandler>::append(U&& value)
appendSlowCase(std::forward<U>(value));
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-void Vector<T, inlineCapacity, OverflowHandler>::appendSlowCase(U&& value)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename... Args>
+ALWAYS_INLINE void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::constructAndAppend(Args&&... args)
+{
+ if (size() != capacity()) {
+ asanBufferSizeWillChangeTo(m_size + 1);
+ new (NotNull, end()) T(std::forward<Args>(args)...);
+ ++m_size;
+ return;
+ }
+
+ constructAndAppendSlowCase(std::forward<Args>(args)...);
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename... Args>
+ALWAYS_INLINE bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::tryConstructAndAppend(Args&&... args)
+{
+ if (size() != capacity()) {
+ asanBufferSizeWillChangeTo(m_size + 1);
+ new (NotNull, end()) T(std::forward<Args>(args)...);
+ ++m_size;
+ return true;
+ }
+
+ return tryConstructAndAppendSlowCase(std::forward<Args>(args)...);
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::appendSlowCase(U&& value)
{
ASSERT(size() == capacity());
@@ -1056,31 +1253,62 @@ void Vector<T, inlineCapacity, OverflowHandler>::appendSlowCase(U&& value)
ptr = expandCapacity(size() + 1, ptr);
ASSERT(begin());
+ asanBufferSizeWillChangeTo(m_size + 1);
new (NotNull, end()) T(std::forward<U>(*ptr));
++m_size;
}
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename... Args>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::constructAndAppendSlowCase(Args&&... args)
+{
+ ASSERT(size() == capacity());
+
+ expandCapacity(size() + 1);
+ ASSERT(begin());
+
+ asanBufferSizeWillChangeTo(m_size + 1);
+ new (NotNull, end()) T(std::forward<Args>(args)...);
+ ++m_size;
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename... Args>
+bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::tryConstructAndAppendSlowCase(Args&&... args)
+{
+ ASSERT(size() == capacity());
+
+ if (UNLIKELY(!tryExpandCapacity(size() + 1)))
+ return false;
+ ASSERT(begin());
+
+ asanBufferSizeWillChangeTo(m_size + 1);
+ new (NotNull, end()) T(std::forward<Args>(args)...);
+ ++m_size;
+ return true;
+}
+
// This version of append saves a branch in the case where you know that the
// vector's capacity is large enough for the append to succeed.
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-inline void Vector<T, inlineCapacity, OverflowHandler>::uncheckedAppend(U&& value)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::uncheckedAppend(U&& value)
{
ASSERT(size() < capacity());
+ asanBufferSizeWillChangeTo(m_size + 1);
+
auto ptr = std::addressof(value);
new (NotNull, end()) T(std::forward<U>(*ptr));
++m_size;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U, size_t otherCapacity>
-inline void Vector<T, inlineCapacity, OverflowHandler>::appendVector(const Vector<U, otherCapacity>& val)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U, size_t otherCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::appendVector(const Vector<U, otherCapacity>& val)
{
append(val.begin(), val.size());
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-void Vector<T, inlineCapacity, OverflowHandler>::insert(size_t position, const U* data, size_t dataSize)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insert(size_t position, const U* data, size_t dataSize)
{
ASSERT_WITH_SECURITY_IMPLICATION(position <= size());
size_t newSize = m_size + dataSize;
@@ -1090,15 +1318,15 @@ void Vector<T, inlineCapacity, OverflowHandler>::insert(size_t position, const U
}
if (newSize < m_size)
CRASH();
+ asanBufferSizeWillChangeTo(newSize);
T* spot = begin() + position;
TypeOperations::moveOverlapping(spot, end(), spot + dataSize);
- for (size_t i = 0; i < dataSize; ++i)
- new (NotNull, &spot[i]) T(data[i]);
+ VectorCopier<std::is_trivial<T>::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), spot);
m_size = newSize;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U>
-inline void Vector<T, inlineCapacity, OverflowHandler>::insert(size_t position, U&& value)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insert(size_t position, U&& value)
{
ASSERT_WITH_SECURITY_IMPLICATION(position <= size());
@@ -1108,30 +1336,33 @@ inline void Vector<T, inlineCapacity, OverflowHandler>::insert(size_t position,
ASSERT(begin());
}
+ asanBufferSizeWillChangeTo(m_size + 1);
+
T* spot = begin() + position;
TypeOperations::moveOverlapping(spot, end(), spot + 1);
new (NotNull, spot) T(std::forward<U>(*ptr));
++m_size;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler> template<typename U, size_t c>
-inline void Vector<T, inlineCapacity, OverflowHandler>::insertVector(size_t position, const Vector<U, c>& val)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> template<typename U, size_t c>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insertVector(size_t position, const Vector<U, c>& val)
{
insert(position, val.begin(), val.size());
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline void Vector<T, inlineCapacity, OverflowHandler>::remove(size_t position)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::remove(size_t position)
{
ASSERT_WITH_SECURITY_IMPLICATION(position < size());
T* spot = begin() + position;
spot->~T();
TypeOperations::moveOverlapping(spot + 1, end(), spot);
+ asanBufferSizeWillChangeTo(m_size - 1);
--m_size;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline void Vector<T, inlineCapacity, OverflowHandler>::remove(size_t position, size_t length)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::remove(size_t position, size_t length)
{
ASSERT_WITH_SECURITY_IMPLICATION(position <= size());
ASSERT_WITH_SECURITY_IMPLICATION(position + length <= size());
@@ -1139,19 +1370,83 @@ inline void Vector<T, inlineCapacity, OverflowHandler>::remove(size_t position,
T* endSpot = beginSpot + length;
TypeOperations::destruct(beginSpot, endSpot);
TypeOperations::moveOverlapping(endSpot, end(), beginSpot);
+ asanBufferSizeWillChangeTo(m_size - length);
m_size -= length;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline void Vector<T, inlineCapacity, OverflowHandler>::reverse()
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+template<typename U>
+inline bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::removeFirst(const U& value)
+{
+ return removeFirstMatching([&value] (const T& current) {
+ return current == value;
+ });
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+template<typename MatchFunction>
+inline bool Vector<T, inlineCapacity, OverflowHandler, minCapacity>::removeFirstMatching(const MatchFunction& matches)
+{
+ for (size_t i = 0; i < size(); ++i) {
+ if (matches(at(i))) {
+ remove(i);
+ return true;
+ }
+ }
+ return false;
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+template<typename U>
+inline unsigned Vector<T, inlineCapacity, OverflowHandler, minCapacity>::removeAll(const U& value)
+{
+ return removeAllMatching([&value] (const T& current) {
+ return current == value;
+ });
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+template<typename MatchFunction>
+inline unsigned Vector<T, inlineCapacity, OverflowHandler, minCapacity>::removeAllMatching(const MatchFunction& matches)
+{
+ iterator holeBegin = end();
+ iterator holeEnd = end();
+ unsigned matchCount = 0;
+ for (auto it = begin(), itEnd = end(); it != itEnd; ++it) {
+ if (matches(*it)) {
+ if (holeBegin == end())
+ holeBegin = it;
+ else if (holeEnd != it) {
+ TypeOperations::moveOverlapping(holeEnd, it, holeBegin);
+ holeBegin += it - holeEnd;
+ }
+ holeEnd = it + 1;
+ it->~T();
+ ++matchCount;
+ }
+ }
+ if (holeEnd != end())
+ TypeOperations::moveOverlapping(holeEnd, end(), holeBegin);
+ asanBufferSizeWillChangeTo(m_size - matchCount);
+ m_size -= matchCount;
+ return matchCount;
+}
+
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::reverse()
{
for (size_t i = 0; i < m_size / 2; ++i)
std::swap(at(i), at(m_size - 1 - i));
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline MallocPtr<T> Vector<T, inlineCapacity, OverflowHandler>::releaseBuffer()
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline MallocPtr<T> Vector<T, inlineCapacity, OverflowHandler, minCapacity>::releaseBuffer()
{
+ // FIXME: Find a way to preserve annotations on the returned buffer.
+ // ASan requires that all annotations are removed before deallocation,
+ // and MallocPtr doesn't implement that.
+ asanSetBufferSizeToFullCapacity();
+
auto buffer = Base::releaseBuffer();
if (inlineCapacity && !buffer && m_size) {
// If the vector had some data, but no buffer to release,
@@ -1162,11 +1457,12 @@ inline MallocPtr<T> Vector<T, inlineCapacity, OverflowHandler>::releaseBuffer()
memcpy(buffer.get(), data(), bytes);
}
m_size = 0;
+ // FIXME: Should we call Base::restoreInlineBufferIfNeeded() here?
return buffer;
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline void Vector<T, inlineCapacity, OverflowHandler>::checkConsistency()
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::checkConsistency()
{
#if !ASSERT_DISABLED
for (size_t i = 0; i < size(); ++i)
@@ -1174,23 +1470,14 @@ inline void Vector<T, inlineCapacity, OverflowHandler>::checkConsistency()
#endif
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-void deprecatedDeleteAllValues(const Vector<T, inlineCapacity, OverflowHandler>& collection)
-{
- typedef typename Vector<T, inlineCapacity, OverflowHandler>::const_iterator iterator;
- iterator end = collection.end();
- for (iterator it = collection.begin(); it != end; ++it)
- delete *it;
-}
-
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline void swap(Vector<T, inlineCapacity, OverflowHandler>& a, Vector<T, inlineCapacity, OverflowHandler>& b)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline void swap(Vector<T, inlineCapacity, OverflowHandler, minCapacity>& a, Vector<T, inlineCapacity, OverflowHandler, minCapacity>& b)
{
a.swap(b);
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-bool operator==(const Vector<T, inlineCapacity, OverflowHandler>& a, const Vector<T, inlineCapacity, OverflowHandler>& b)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+bool operator==(const Vector<T, inlineCapacity, OverflowHandler, minCapacity>& a, const Vector<T, inlineCapacity, OverflowHandler, minCapacity>& b)
{
if (a.size() != b.size())
return false;
@@ -1198,8 +1485,8 @@ bool operator==(const Vector<T, inlineCapacity, OverflowHandler>& a, const Vecto
return VectorTypeOperations<T>::compare(a.data(), b.data(), a.size());
}
-template<typename T, size_t inlineCapacity, typename OverflowHandler>
-inline bool operator!=(const Vector<T, inlineCapacity, OverflowHandler>& a, const Vector<T, inlineCapacity, OverflowHandler>& b)
+template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity>
+inline bool operator!=(const Vector<T, inlineCapacity, OverflowHandler, minCapacity>& a, const Vector<T, inlineCapacity, OverflowHandler, minCapacity>& b)
{
return !(a == b);
}
diff --git a/Source/WTF/wtf/VectorTraits.h b/Source/WTF/wtf/VectorTraits.h
index e3595cde1..b3fb077bb 100644
--- a/Source/WTF/wtf/VectorTraits.h
+++ b/Source/WTF/wtf/VectorTraits.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2006, 2007, 2008, 2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -21,11 +21,11 @@
#ifndef WTF_VectorTraits_h
#define WTF_VectorTraits_h
-#include <wtf/OwnPtr.h>
#include <wtf/Ref.h>
#include <wtf/RefPtr.h>
#include <utility>
#include <memory>
+#include <type_traits>
namespace WTF {
@@ -52,7 +52,7 @@ namespace WTF {
static const bool canInitializeWithMemset = false;
static const bool canMoveWithMemcpy = true;
static const bool canCopyWithMemcpy = true;
- static const bool canFillWithMemset = sizeof(T) == sizeof(char);
+ static const bool canFillWithMemset = sizeof(T) == sizeof(char) && std::is_integral<T>::value;
static const bool canCompareWithMemcmp = true;
};
@@ -66,19 +66,13 @@ namespace WTF {
static const bool canCompareWithMemcmp = true;
};
- // We know OwnPtr and RefPtr are simple enough that initializing to 0 and moving with memcpy
- // (and then not destructing the original) will totally work
- template<typename P>
- struct VectorTraits<RefPtr<P>> : SimpleClassVectorTraits { };
+ // We know smart pointers are simple enough that initializing to 0 and moving with memcpy
+ // (and then not destructing the original) will work.
- template<typename P>
- struct VectorTraits<OwnPtr<P>> : SimpleClassVectorTraits { };
-
- template<typename P>
- struct VectorTraits<Ref<P>> : SimpleClassVectorTraits { };
-
- template<>
- struct VectorTraits<AtomicString> : SimpleClassVectorTraits { };
+ template<typename P> struct VectorTraits<RefPtr<P>> : SimpleClassVectorTraits { };
+ template<typename P> struct VectorTraits<std::unique_ptr<P>> : SimpleClassVectorTraits { };
+ template<typename P> struct VectorTraits<Ref<P>> : SimpleClassVectorTraits { };
+ template<> struct VectorTraits<AtomicString> : SimpleClassVectorTraits { };
template<typename First, typename Second>
struct VectorTraits<std::pair<First, Second>>
diff --git a/Source/WTF/wtf/WTFThreadData.cpp b/Source/WTF/wtf/WTFThreadData.cpp
index 030fcf03e..7afbccdff 100644
--- a/Source/WTF/wtf/WTFThreadData.cpp
+++ b/Source/WTF/wtf/WTFThreadData.cpp
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -35,56 +35,42 @@
namespace WTF {
+#if !USE(PTHREAD_GETSPECIFIC_DIRECT)
ThreadSpecific<WTFThreadData>* WTFThreadData::staticData;
+#endif
WTFThreadData::WTFThreadData()
: m_apiData(0)
- , m_atomicStringTable(0)
+ , m_currentAtomicStringTable(0)
+ , m_defaultAtomicStringTable(0)
, m_atomicStringTableDestructor(0)
-#if !USE(WEB_THREAD)
- , m_defaultIdentifierTable(new JSC::IdentifierTable())
- , m_currentIdentifierTable(m_defaultIdentifierTable)
-#endif
, m_stackBounds(StackBounds::currentThreadStackBounds())
#if ENABLE(STACK_STATS)
, m_stackStats()
#endif
+ , m_savedStackPointerAtVMEntry(0)
+ , m_savedLastStackTop(stack().origin())
{
-#if USE(WEB_THREAD)
- static JSC::IdentifierTable* sharedIdentifierTable = new JSC::IdentifierTable();
- if (pthread_main_np() || isWebThread())
- m_defaultIdentifierTable = sharedIdentifierTable;
- else
- m_defaultIdentifierTable = new JSC::IdentifierTable();
-
- m_currentIdentifierTable = m_defaultIdentifierTable;
-#endif
AtomicStringTable::create(*this);
+ m_currentAtomicStringTable = m_defaultAtomicStringTable;
}
WTFThreadData::~WTFThreadData()
{
if (m_atomicStringTableDestructor)
- m_atomicStringTableDestructor(m_atomicStringTable);
- delete m_defaultIdentifierTable;
+ m_atomicStringTableDestructor(m_defaultAtomicStringTable);
}
-} // namespace WTF
-
-namespace JSC {
-
-IdentifierTable::~IdentifierTable()
-{
- HashSet<StringImpl*>::iterator end = m_table.end();
- for (HashSet<StringImpl*>::iterator iter = m_table.begin(); iter != end; ++iter)
- (*iter)->setIsIdentifier(false);
-}
-
-HashSet<StringImpl*>::AddResult IdentifierTable::add(StringImpl* value)
+#if USE(PTHREAD_GETSPECIFIC_DIRECT)
+WTFThreadData& WTFThreadData::createAndRegisterForGetspecificDirect()
{
- HashSet<StringImpl*>::AddResult result = m_table.add(value);
- (*result.iterator)->setIsIdentifier(true);
- return result;
+ WTFThreadData* data = new WTFThreadData;
+ _pthread_setspecific_direct(directKey, data);
+ pthread_key_init_np(directKey, [](void* data){
+ delete static_cast<WTFThreadData*>(data);
+ });
+ return *data;
}
+#endif
-} // namespace JSC
+} // namespace WTF
diff --git a/Source/WTF/wtf/WTFThreadData.h b/Source/WTF/wtf/WTFThreadData.h
index 0c6a54d43..be215c7d4 100644
--- a/Source/WTF/wtf/WTFThreadData.h
+++ b/Source/WTF/wtf/WTFThreadData.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -33,27 +33,19 @@
#include <wtf/StackBounds.h>
#include <wtf/StackStats.h>
#include <wtf/text/StringHash.h>
-#include <wtf/ThreadSpecific.h>
-#include <wtf/Threading.h>
-
-// FIXME: This is a temporary layering violation until we move more of the string code from JavaScriptCore to WTF.
-namespace JSC {
-
-class IdentifierTable {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- WTF_EXPORT_PRIVATE ~IdentifierTable();
- WTF_EXPORT_PRIVATE HashSet<StringImpl*>::AddResult add(StringImpl*);
- template<typename U, typename V> HashSet<StringImpl*>::AddResult add(U);
-
- bool remove(StringImpl* identifier) { return m_table.remove(identifier); }
+#if USE(APPLE_INTERNAL_SDK)
+#include <System/pthread_machdep.h>
+#endif
-private:
- HashSet<StringImpl*> m_table;
-};
+#if defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY1)
+#define USE_PTHREAD_GETSPECIFIC_DIRECT 1
+#endif
-}
+#if !USE(PTHREAD_GETSPECIFIC_DIRECT)
+#include <wtf/ThreadSpecific.h>
+#include <wtf/Threading.h>
+#endif
namespace WTF {
@@ -69,24 +61,14 @@ public:
AtomicStringTable* atomicStringTable()
{
- return m_atomicStringTable;
+ return m_currentAtomicStringTable;
}
- JSC::IdentifierTable* currentIdentifierTable()
+ AtomicStringTable* setCurrentAtomicStringTable(AtomicStringTable* atomicStringTable)
{
- return m_currentIdentifierTable;
- }
-
- JSC::IdentifierTable* setCurrentIdentifierTable(JSC::IdentifierTable* identifierTable)
- {
- JSC::IdentifierTable* oldIdentifierTable = m_currentIdentifierTable;
- m_currentIdentifierTable = identifierTable;
- return oldIdentifierTable;
- }
-
- void resetCurrentIdentifierTable()
- {
- m_currentIdentifierTable = m_defaultIdentifierTable;
+ AtomicStringTable* oldAtomicStringTable = m_currentAtomicStringTable;
+ m_currentAtomicStringTable = atomicStringTable;
+ return oldAtomicStringTable;
}
const StackBounds& stack()
@@ -106,20 +88,47 @@ public:
}
#endif
+ void* savedStackPointerAtVMEntry()
+ {
+ return m_savedStackPointerAtVMEntry;
+ }
+
+ void setSavedStackPointerAtVMEntry(void* stackPointerAtVMEntry)
+ {
+ m_savedStackPointerAtVMEntry = stackPointerAtVMEntry;
+ }
+
+ void* savedLastStackTop()
+ {
+ return m_savedLastStackTop;
+ }
+
+ void setSavedLastStackTop(void* lastStackTop)
+ {
+ m_savedLastStackTop = lastStackTop;
+ }
+
void* m_apiData;
private:
- AtomicStringTable* m_atomicStringTable;
+ AtomicStringTable* m_currentAtomicStringTable;
+ AtomicStringTable* m_defaultAtomicStringTable;
AtomicStringTableDestructor m_atomicStringTableDestructor;
- JSC::IdentifierTable* m_defaultIdentifierTable;
- JSC::IdentifierTable* m_currentIdentifierTable;
StackBounds m_stackBounds;
#if ENABLE(STACK_STATS)
StackStats::PerThreadStats m_stackStats;
#endif
+ void* m_savedStackPointerAtVMEntry;
+ void* m_savedLastStackTop;
+#if USE(PTHREAD_GETSPECIFIC_DIRECT)
+ static const pthread_key_t directKey = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY1;
+ WTF_EXPORT_PRIVATE static WTFThreadData& createAndRegisterForGetspecificDirect();
+#else
static WTF_EXPORTDATA ThreadSpecific<WTFThreadData>* staticData;
+#endif
+
friend WTFThreadData& wtfThreadData();
friend class AtomicStringTable;
};
@@ -132,14 +141,21 @@ inline WTFThreadData& wtfThreadData()
// WRT JavaScriptCore:
// wtfThreadData() is initially called from initializeThreading(), ensuring
// this is initially called in a pthread_once locked context.
+#if !USE(PTHREAD_GETSPECIFIC_DIRECT)
if (!WTFThreadData::staticData)
WTFThreadData::staticData = new ThreadSpecific<WTFThreadData>;
return **WTFThreadData::staticData;
+#else
+ if (WTFThreadData* data = static_cast<WTFThreadData*>(_pthread_getspecific_direct(WTFThreadData::directKey)))
+ return *data;
+ return WTFThreadData::createAndRegisterForGetspecificDirect();
+#endif
}
} // namespace WTF
using WTF::WTFThreadData;
using WTF::wtfThreadData;
+using WTF::AtomicStringTable;
#endif // WTFThreadData_h
diff --git a/Source/WTF/wtf/WallTime.cpp b/Source/WTF/wtf/WallTime.cpp
new file mode 100644
index 000000000..1622ab3c6
--- /dev/null
+++ b/Source/WTF/wtf/WallTime.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WallTime.h"
+
+#include "CurrentTime.h"
+#include "MonotonicTime.h"
+#include "PrintStream.h"
+#include "TimeWithDynamicClockType.h"
+
+namespace WTF {
+
+WallTime WallTime::now()
+{
+ return fromRawSeconds(currentTime());
+}
+
+MonotonicTime WallTime::approximateMonotonicTime() const
+{
+ return *this - now() + MonotonicTime::now();
+}
+
+void WallTime::dump(PrintStream& out) const
+{
+ out.print("Wall(", m_value, " sec)");
+}
+
+} // namespace WTF
+
+
diff --git a/Source/WTF/wtf/WallTime.h b/Source/WTF/wtf/WallTime.h
new file mode 100644
index 000000000..0a85e4838
--- /dev/null
+++ b/Source/WTF/wtf/WallTime.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_WallTime_h
+#define WTF_WallTime_h
+
+#include <wtf/ClockType.h>
+#include <wtf/Seconds.h>
+
+namespace WTF {
+
+class MonotonicTime;
+class PrintStream;
+
+// The current time according to a wall clock (aka real time clock). This uses floating point
+// internally so that you can reason about infinity and other things that arise in math. It's
+// acceptable to use this to wrap NaN times, negative times, and infinite times, so long as they
+// are relative to the same clock. Specifically, WallTime should be used in agreement with the
+// principle that WallTime::now().secondsSinceEpoch().value() is the same as WTF::currentTime().
+class WallTime {
+public:
+ static const ClockType clockType = ClockType::Wall;
+
+ // This is the epoch. So, x.secondsSinceEpoch() should be the same as x - WallTime().
+ WallTime() { }
+
+ // Call this if you know for sure that the double represents time according to
+ // WTF::currentTime(). It must be in seconds and it must be from the same time source.
+ static WallTime fromRawSeconds(double value)
+ {
+ WallTime result;
+ result.m_value = value;
+ return result;
+ }
+
+ WTF_EXPORT_PRIVATE static WallTime now();
+
+ static WallTime infinity() { return fromRawSeconds(std::numeric_limits<double>::infinity()); }
+
+ Seconds secondsSinceEpoch() const { return Seconds(m_value); }
+
+ WallTime approximateWallTime() const { return *this; }
+ WTF_EXPORT_PRIVATE MonotonicTime approximateMonotonicTime() const;
+
+ explicit operator bool() const { return !!m_value; }
+
+ WallTime operator+(Seconds other) const
+ {
+ return fromRawSeconds(m_value + other.value());
+ }
+
+ WallTime operator-(Seconds other) const
+ {
+ return fromRawSeconds(m_value - other.value());
+ }
+
+ // Time is a scalar and scalars can be negated as this could arise from algebraic
+ // transformations. So, we allow it.
+ WallTime operator-() const
+ {
+ return fromRawSeconds(-m_value);
+ }
+
+ WallTime& operator+=(Seconds other)
+ {
+ return *this = *this + other;
+ }
+
+ WallTime& operator-=(Seconds other)
+ {
+ return *this = *this - other;
+ }
+
+ Seconds operator-(WallTime other) const
+ {
+ return Seconds(m_value - other.m_value);
+ }
+
+ bool operator==(WallTime other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ bool operator!=(WallTime other) const
+ {
+ return m_value != other.m_value;
+ }
+
+ bool operator<(WallTime other) const
+ {
+ return m_value < other.m_value;
+ }
+
+ bool operator>(WallTime other) const
+ {
+ return m_value > other.m_value;
+ }
+
+ bool operator<=(WallTime other) const
+ {
+ return m_value <= other.m_value;
+ }
+
+ bool operator>=(WallTime other) const
+ {
+ return m_value >= other.m_value;
+ }
+
+ WTF_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+private:
+ double m_value { 0 };
+};
+
+WTF_EXPORT_PRIVATE void sleep(WallTime);
+
+} // namespace WTF
+
+using WTF::WallTime;
+
+#endif // WTF_WallTime_h
diff --git a/Source/WTF/wtf/WeakPtr.h b/Source/WTF/wtf/WeakPtr.h
index 97da507aa..f15160bec 100644
--- a/Source/WTF/wtf/WeakPtr.h
+++ b/Source/WTF/wtf/WeakPtr.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2013 Google, Inc. All Rights Reserved.
+ * Copyright (C) 2015 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,8 +28,7 @@
#define WTF_WeakPtr_h
#include <wtf/Noncopyable.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefPtr.h>
+#include <wtf/Ref.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Threading.h>
@@ -38,14 +38,15 @@
namespace WTF {
+template<typename T> class WeakPtr;
+template<typename T> class WeakPtrFactory;
+
+// Note: WeakReference is an implementation detail, and should not be used directly.
template<typename T>
class WeakReference : public ThreadSafeRefCounted<WeakReference<T>> {
WTF_MAKE_NONCOPYABLE(WeakReference<T>);
WTF_MAKE_FAST_ALLOCATED;
public:
- static PassRefPtr<WeakReference<T>> create(T* ptr) { return adoptRef(new WeakReference(ptr)); }
- static PassRefPtr<WeakReference<T>> createUnbound() { return adoptRef(new WeakReference()); }
-
T* get() const
{
#if USE(WEB_THREAD)
@@ -63,20 +64,14 @@ public:
#else
ASSERT(m_boundThread == currentThread());
#endif
- m_ptr = 0;
- }
-
- void bindTo(T* ptr)
- {
- ASSERT(!m_ptr);
-#ifndef NDEBUG
- m_boundThread = currentThread();
-#endif
- m_ptr = ptr;
+ m_ptr = nullptr;
}
private:
- WeakReference() : m_ptr(0) { }
+ friend class WeakPtr<T>;
+ friend class WeakPtrFactory<T>;
+
+ static Ref<WeakReference<T>> create(T* ptr) { return adoptRef(*new WeakReference(ptr)); }
explicit WeakReference(T* ptr)
: m_ptr(ptr)
@@ -96,18 +91,25 @@ template<typename T>
class WeakPtr {
WTF_MAKE_FAST_ALLOCATED;
public:
- WeakPtr() { }
- WeakPtr(PassRefPtr<WeakReference<T>> ref) : m_ref(ref) { }
+ WeakPtr() : m_ref(WeakReference<T>::create(nullptr)) { }
+ WeakPtr(const WeakPtr& o) : m_ref(o.m_ref.copyRef()) { }
+ template<typename U> WeakPtr(const WeakPtr<U>& o) : m_ref(o.m_ref.copyRef()) { }
T* get() const { return m_ref->get(); }
- explicit operator bool() const { return m_ref->get(); }
+ operator bool() const { return m_ref->get(); }
+
+ WeakPtr& operator=(const WeakPtr& o) { m_ref = o.m_ref.copyRef(); return *this; }
+ WeakPtr& operator=(std::nullptr_t) { m_ref = WeakReference<T>::create(nullptr); return *this; }
T* operator->() const { return m_ref->get(); }
- WeakPtr& operator=(std::nullptr_t) { m_ref = WeakReference<T>::create(nullptr); return *this; }
+ void clear() { m_ref = WeakReference<T>::create(nullptr); }
private:
- RefPtr<WeakReference<T>> m_ref;
+ friend class WeakPtrFactory<T>;
+ WeakPtr(Ref<WeakReference<T>>&& ref) : m_ref(std::forward<Ref<WeakReference<T>>>(ref)) { }
+
+ Ref<WeakReference<T>> m_ref;
};
template<typename T>
@@ -117,16 +119,10 @@ class WeakPtrFactory {
public:
explicit WeakPtrFactory(T* ptr) : m_ref(WeakReference<T>::create(ptr)) { }
- WeakPtrFactory(PassRefPtr<WeakReference<T>> ref, T* ptr)
- : m_ref(ref)
- {
- m_ref->bindTo(ptr);
- }
-
~WeakPtrFactory() { m_ref->clear(); }
// We should consider having createWeakPtr populate m_ref the first time createWeakPtr is called.
- WeakPtr<T> createWeakPtr() { return WeakPtr<T>(m_ref); }
+ WeakPtr<T> createWeakPtr() const { return WeakPtr<T>(m_ref.copyRef()); }
void revokeAll()
{
@@ -137,9 +133,39 @@ public:
}
private:
- RefPtr<WeakReference<T>> m_ref;
+ Ref<WeakReference<T>> m_ref;
};
+template<typename T, typename U> inline bool operator==(const WeakPtr<T>& a, const WeakPtr<U>& b)
+{
+ return a.get() == b.get();
+}
+
+template<typename T, typename U> inline bool operator==(const WeakPtr<T>& a, U* b)
+{
+ return a.get() == b;
+}
+
+template<typename T, typename U> inline bool operator==(T* a, const WeakPtr<U>& b)
+{
+ return a == b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const WeakPtr<T>& a, const WeakPtr<U>& b)
+{
+ return a.get() != b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const WeakPtr<T>& a, U* b)
+{
+ return a.get() != b;
+}
+
+template<typename T, typename U> inline bool operator!=(T* a, const WeakPtr<U>& b)
+{
+ return a != b.get();
+}
+
} // namespace WTF
using WTF::WeakPtr;
diff --git a/Source/WTF/wtf/WeakRandom.h b/Source/WTF/wtf/WeakRandom.h
new file mode 100644
index 000000000..4d59e44c0
--- /dev/null
+++ b/Source/WTF/wtf/WeakRandom.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2009, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Vigna, Sebastiano (2014). "Further scramblings of Marsaglia's xorshift
+ * generators". arXiv:1404.0390 (http://arxiv.org/abs/1404.0390)
+ *
+ * See also https://en.wikipedia.org/wiki/Xorshift.
+ */
+
+#ifndef WeakRandom_h
+#define WeakRandom_h
+
+#include <limits.h>
+#include <wtf/CryptographicallyRandomNumber.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+// The code used to generate random numbers are inlined manually in JIT code.
+// So it needs to stay in sync with the JIT one.
+class WeakRandom {
+public:
+ WeakRandom(unsigned seed = cryptographicallyRandomNumber())
+ {
+ setSeed(seed);
+ }
+
+ void setSeed(unsigned seed)
+ {
+ m_seed = seed;
+
+ // A zero seed would cause an infinite series of zeroes.
+ if (!seed)
+ seed = 1;
+
+ m_low = seed;
+ m_high = seed;
+ advance();
+ }
+
+ unsigned seed() const { return m_seed; }
+
+ double get()
+ {
+ uint64_t value = advance() & ((1ULL << 53) - 1);
+ return value * (1.0 / (1ULL << 53));
+ }
+
+ unsigned getUint32()
+ {
+ return static_cast<unsigned>(advance());
+ }
+
+ unsigned getUint32(unsigned limit)
+ {
+ if (limit <= 1)
+ return 0;
+ uint64_t cutoff = (static_cast<uint64_t>(std::numeric_limits<unsigned>::max()) + 1) / limit * limit;
+ for (;;) {
+ uint64_t value = getUint32();
+ if (value >= cutoff)
+ continue;
+ return value % limit;
+ }
+ }
+
+ static unsigned lowOffset() { return OBJECT_OFFSETOF(WeakRandom, m_low); }
+ static unsigned highOffset() { return OBJECT_OFFSETOF(WeakRandom, m_high); }
+
+private:
+ uint64_t advance()
+ {
+ uint64_t x = m_low;
+ uint64_t y = m_high;
+ m_low = y;
+ x ^= x << 23;
+ x ^= x >> 17;
+ x ^= y ^ (y >> 26);
+ m_high = x;
+ return x + y;
+ }
+
+ unsigned m_seed;
+ uint64_t m_low;
+ uint64_t m_high;
+};
+
+} // namespace WTF
+
+using WTF::WeakRandom;
+
+#endif // WeakRandom_h
diff --git a/Source/WTF/wtf/WindowsExtras.h b/Source/WTF/wtf/WindowsExtras.h
index 16bc86596..061db5379 100644
--- a/Source/WTF/wtf/WindowsExtras.h
+++ b/Source/WTF/wtf/WindowsExtras.h
@@ -39,34 +39,17 @@ namespace WTF {
inline HRESULT getRegistryValue(HKEY hkey, LPCWSTR pszSubKey, LPCWSTR pszValue, LPDWORD pdwType, LPVOID pvData, LPDWORD pcbData)
{
-#if OS(WINCE)
- HKEY key;
- if (::RegOpenKeyExW(hkey, pszSubKey, 0, 0, &key) != ERROR_SUCCESS)
- return ERROR_INVALID_NAME;
- HRESULT result = ::RegQueryValueExW(key, pszValue, 0, pdwType, static_cast<LPBYTE>(pvData), pcbData);
- ::RegCloseKey(key);
- return result;
-#else
return ::SHGetValueW(hkey, pszSubKey, pszValue, pdwType, pvData, pcbData);
-#endif
}
inline void* getWindowPointer(HWND hWnd, int index)
{
-#if OS(WINCE)
- return reinterpret_cast<void*>(::GetWindowLong(hWnd, index));
-#else
return reinterpret_cast<void*>(::GetWindowLongPtr(hWnd, index));
-#endif
}
inline void* setWindowPointer(HWND hWnd, int index, void* value)
{
-#if OS(WINCE)
- return reinterpret_cast<void*>(::SetWindowLong(hWnd, index, reinterpret_cast<LONG>(value)));
-#else
return reinterpret_cast<void*>(::SetWindowLongPtr(hWnd, index, reinterpret_cast<LONG_PTR>(value)));
-#endif
}
} // namespace WTF
diff --git a/Source/WTF/wtf/WordLock.cpp b/Source/WTF/wtf/WordLock.cpp
new file mode 100644
index 000000000..46170ad2c
--- /dev/null
+++ b/Source/WTF/wtf/WordLock.cpp
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WordLock.h"
+
+#include "ThreadSpecific.h"
+#include "ThreadingPrimitives.h"
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+namespace WTF {
+
+namespace {
+
+// This data structure serves three purposes:
+//
+// 1) A parking mechanism for threads that go to sleep. That involves just a system mutex and
+// condition variable.
+//
+// 2) A queue node for when a thread is on some WordLock's queue.
+//
+// 3) The queue head. This is kind of funky. When a thread is the head of a queue, it also serves as
+// the basic queue bookkeeping data structure. When a thread is dequeued, the next thread in the
+// queue takes on the queue head duties.
+struct ThreadData {
+ // The parking mechanism.
+ bool shouldPark { false };
+ std::mutex parkingLock;
+ std::condition_variable parkingCondition;
+
+ // The queue node.
+ ThreadData* nextInQueue { nullptr };
+
+ // The queue itself.
+ ThreadData* queueTail { nullptr };
+};
+
+ThreadSpecific<ThreadData, CanBeGCThread::True>* threadData;
+
+ThreadData* myThreadData()
+{
+ static std::once_flag initializeOnce;
+ std::call_once(
+ initializeOnce,
+ [] {
+ threadData = new ThreadSpecific<ThreadData, CanBeGCThread::True>();
+ });
+
+ return *threadData;
+}
+
+} // anonymous namespace
+
+NEVER_INLINE void WordLockBase::lockSlow()
+{
+ unsigned spinCount = 0;
+
+ // This magic number turns out to be optimal based on past JikesRVM experiments.
+ const unsigned spinLimit = 40;
+
+ for (;;) {
+ uintptr_t currentWordValue = m_word.load();
+
+ if (!(currentWordValue & isLockedBit)) {
+ // It's not possible for someone to hold the queue lock while the lock itself is no longer
+ // held, since we will only attempt to acquire the queue lock when the lock is held and
+ // the queue lock prevents unlock.
+ ASSERT(!(currentWordValue & isQueueLockedBit));
+ if (m_word.compareExchangeWeak(currentWordValue, currentWordValue | isLockedBit)) {
+ // Success! We acquired the lock.
+ return;
+ }
+ }
+
+ // If there is no queue and we haven't spun too much, we can just try to spin around again.
+ if (!(currentWordValue & ~queueHeadMask) && spinCount < spinLimit) {
+ spinCount++;
+ std::this_thread::yield();
+ continue;
+ }
+
+ // Need to put ourselves on the queue. Create the queue if one does not exist. This requries
+ // owning the queue for a little bit. The lock that controls the queue is itself a spinlock.
+ // But before we acquire the queue spinlock, we make sure that we have a ThreadData for this
+ // thread.
+ ThreadData* me = myThreadData();
+ ASSERT(!me->shouldPark);
+ ASSERT(!me->nextInQueue);
+ ASSERT(!me->queueTail);
+
+ // Reload the current word value, since some time may have passed.
+ currentWordValue = m_word.load();
+
+ // We proceed only if the queue lock is not held, the WordLock is held, and we succeed in
+ // acquiring the queue lock.
+ if ((currentWordValue & isQueueLockedBit)
+ || !(currentWordValue & isLockedBit)
+ || !m_word.compareExchangeWeak(currentWordValue, currentWordValue | isQueueLockedBit)) {
+ std::this_thread::yield();
+ continue;
+ }
+
+ me->shouldPark = true;
+
+ // We own the queue. Nobody can enqueue or dequeue until we're done. Also, it's not possible
+ // to release the WordLock while we hold the queue lock.
+ ThreadData* queueHead = bitwise_cast<ThreadData*>(currentWordValue & ~queueHeadMask);
+ if (queueHead) {
+ // Put this thread at the end of the queue.
+ queueHead->queueTail->nextInQueue = me;
+ queueHead->queueTail = me;
+
+ // Release the queue lock.
+ currentWordValue = m_word.load();
+ ASSERT(currentWordValue & ~queueHeadMask);
+ ASSERT(currentWordValue & isQueueLockedBit);
+ ASSERT(currentWordValue & isLockedBit);
+ m_word.store(currentWordValue & ~isQueueLockedBit);
+ } else {
+ // Make this thread be the queue-head.
+ queueHead = me;
+ me->queueTail = me;
+
+ // Release the queue lock and install ourselves as the head. No need for a CAS loop, since
+ // we own the queue lock.
+ currentWordValue = m_word.load();
+ ASSERT(~(currentWordValue & ~queueHeadMask));
+ ASSERT(currentWordValue & isQueueLockedBit);
+ ASSERT(currentWordValue & isLockedBit);
+ uintptr_t newWordValue = currentWordValue;
+ newWordValue |= bitwise_cast<uintptr_t>(queueHead);
+ newWordValue &= ~isQueueLockedBit;
+ m_word.store(newWordValue);
+ }
+
+ // At this point everyone who acquires the queue lock will see me on the queue, and anyone who
+ // acquires me's lock will see that me wants to park. Note that shouldPark may have been
+ // cleared as soon as the queue lock was released above, but it will happen while the
+ // releasing thread holds me's parkingLock.
+
+ {
+ std::unique_lock<std::mutex> locker(me->parkingLock);
+ while (me->shouldPark)
+ me->parkingCondition.wait(locker);
+ }
+
+ ASSERT(!me->shouldPark);
+ ASSERT(!me->nextInQueue);
+ ASSERT(!me->queueTail);
+
+ // Now we can loop around and try to acquire the lock again.
+ }
+}
+
+NEVER_INLINE void WordLockBase::unlockSlow()
+{
+ // The fast path can fail either because of spurious weak CAS failure, or because someone put a
+ // thread on the queue, or the queue lock is held. If the queue lock is held, it can only be
+ // because someone *will* enqueue a thread onto the queue.
+
+ // Acquire the queue lock, or release the lock. This loop handles both lock release in case the
+ // fast path's weak CAS spuriously failed and it handles queue lock acquisition if there is
+ // actually something interesting on the queue.
+ for (;;) {
+ uintptr_t currentWordValue = m_word.load();
+
+ ASSERT(currentWordValue & isLockedBit);
+
+ if (currentWordValue == isLockedBit) {
+ if (m_word.compareExchangeWeak(isLockedBit, 0)) {
+ // The fast path's weak CAS had spuriously failed, and now we succeeded. The lock is
+ // unlocked and we're done!
+ return;
+ }
+ // Loop around and try again.
+ std::this_thread::yield();
+ continue;
+ }
+
+ if (currentWordValue & isQueueLockedBit) {
+ std::this_thread::yield();
+ continue;
+ }
+
+ // If it wasn't just a spurious weak CAS failure and if the queue lock is not held, then there
+ // must be an entry on the queue.
+ ASSERT(currentWordValue & ~queueHeadMask);
+
+ if (m_word.compareExchangeWeak(currentWordValue, currentWordValue | isQueueLockedBit))
+ break;
+ }
+
+ uintptr_t currentWordValue = m_word.load();
+
+ // After we acquire the queue lock, the WordLock must still be held and the queue must be
+ // non-empty. The queue must be non-empty since only the lockSlow() method could have held the
+ // queue lock and if it did then it only releases it after putting something on the queue.
+ ASSERT(currentWordValue & isLockedBit);
+ ASSERT(currentWordValue & isQueueLockedBit);
+ ThreadData* queueHead = bitwise_cast<ThreadData*>(currentWordValue & ~queueHeadMask);
+ ASSERT(queueHead);
+
+ ThreadData* newQueueHead = queueHead->nextInQueue;
+ // Either this was the only thread on the queue, in which case we delete the queue, or there
+ // are still more threads on the queue, in which case we create a new queue head.
+ if (newQueueHead)
+ newQueueHead->queueTail = queueHead->queueTail;
+
+ // Change the queue head, possibly removing it if newQueueHead is null. No need for a CAS loop,
+ // since we hold the queue lock and the lock itself so nothing about the lock can change right
+ // now.
+ currentWordValue = m_word.load();
+ ASSERT(currentWordValue & isLockedBit);
+ ASSERT(currentWordValue & isQueueLockedBit);
+ ASSERT((currentWordValue & ~queueHeadMask) == bitwise_cast<uintptr_t>(queueHead));
+ uintptr_t newWordValue = currentWordValue;
+ newWordValue &= ~isLockedBit; // Release the WordLock.
+ newWordValue &= ~isQueueLockedBit; // Release the queue lock.
+ newWordValue &= queueHeadMask; // Clear out the old queue head.
+ newWordValue |= bitwise_cast<uintptr_t>(newQueueHead); // Install new queue head.
+ m_word.store(newWordValue);
+
+ // Now the lock is available for acquisition. But we just have to wake up the old queue head.
+ // After that, we're done!
+
+ queueHead->nextInQueue = nullptr;
+ queueHead->queueTail = nullptr;
+
+ // We do this carefully because this may run either before or during the parkingLock critical
+ // section in lockSlow().
+ {
+ std::unique_lock<std::mutex> locker(queueHead->parkingLock);
+ queueHead->shouldPark = false;
+ }
+ // Doesn't matter if we notify_all() or notify_one() here since the only thread that could be
+ // waiting is queueHead.
+ queueHead->parkingCondition.notify_one();
+
+ // The old queue head can now contend for the lock again. We're done!
+}
+
+} // namespace WTF
+
diff --git a/Source/WTF/wtf/WordLock.h b/Source/WTF/wtf/WordLock.h
new file mode 100644
index 000000000..52191ee01
--- /dev/null
+++ b/Source/WTF/wtf/WordLock.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_WordLock_h
+#define WTF_WordLock_h
+
+#include <wtf/Atomics.h>
+#include <wtf/Compiler.h>
+#include <wtf/Locker.h>
+#include <wtf/Noncopyable.h>
+
+namespace TestWebKitAPI {
+struct LockInspector;
+};
+
+namespace WTF {
+
+// A WordLock is a fully adaptive mutex that uses sizeof(void*) storage. It has a fast path that is
+// similar to a spinlock, and a slow path that is similar to std::mutex. In most cases, you should use
+// Lock instead. WordLock sits lower in the stack and is used to implement Lock, so Lock is the main
+// client of WordLock.
+
+// NOTE: This is also a great lock to use if you are very low in the stack. For example,
+// PrintStream uses this so that ParkingLot and Lock can use PrintStream. This means that if you
+// try to use dataLog to debug this code, you will have a bad time.
+
+struct WordLockBase {
+ void lock()
+ {
+ if (LIKELY(m_word.compareExchangeWeak(0, isLockedBit, std::memory_order_acquire))) {
+ // WordLock acquired!
+ return;
+ }
+
+ lockSlow();
+ }
+
+ void unlock()
+ {
+ if (LIKELY(m_word.compareExchangeWeak(isLockedBit, 0, std::memory_order_release))) {
+ // WordLock released, and nobody was waiting!
+ return;
+ }
+
+ unlockSlow();
+ }
+
+ bool isHeld() const
+ {
+ return m_word.load(std::memory_order_acquire) & isLockedBit;
+ }
+
+ bool isLocked() const
+ {
+ return isHeld();
+ }
+
+protected:
+ friend struct TestWebKitAPI::LockInspector;
+
+ static const uintptr_t isLockedBit = 1;
+ static const uintptr_t isQueueLockedBit = 2;
+ static const uintptr_t queueHeadMask = 3;
+
+ WTF_EXPORT_PRIVATE void lockSlow();
+ WTF_EXPORT_PRIVATE void unlockSlow();
+
+ // Method used for testing only.
+ bool isFullyReset() const
+ {
+ return !m_word.load();
+ }
+
+ Atomic<uintptr_t> m_word;
+};
+
+class WordLock : public WordLockBase {
+ WTF_MAKE_NONCOPYABLE(WordLock);
+public:
+ WordLock()
+ {
+ m_word.store(0, std::memory_order_relaxed);
+ }
+};
+
+typedef WordLockBase StaticWordLock;
+typedef Locker<WordLockBase> WordLockHolder;
+
+} // namespace WTF
+
+using WTF::WordLock;
+using WTF::WordLockHolder;
+using WTF::StaticWordLock;
+
+#endif // WTF_WordLock_h
+
diff --git a/Source/WTF/wtf/WorkQueue.cpp b/Source/WTF/wtf/WorkQueue.cpp
new file mode 100644
index 000000000..b2a0a79f4
--- /dev/null
+++ b/Source/WTF/wtf/WorkQueue.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WorkQueue.h"
+
+#include <mutex>
+#include <wtf/Condition.h>
+#include <wtf/Deque.h>
+#include <wtf/Lock.h>
+#include <wtf/NeverDestroyed.h>
+#include <wtf/NumberOfCores.h>
+#include <wtf/Ref.h>
+#include <wtf/Threading.h>
+#include <wtf/text/WTFString.h>
+#include <wtf/threads/BinarySemaphore.h>
+
+namespace WTF {
+
+Ref<WorkQueue> WorkQueue::create(const char* name, Type type, QOS qos)
+{
+ return adoptRef(*new WorkQueue(name, type, qos));
+}
+
+WorkQueue::WorkQueue(const char* name, Type type, QOS qos)
+{
+ platformInitialize(name, type, qos);
+}
+
+WorkQueue::~WorkQueue()
+{
+ platformInvalidate();
+}
+
+#if !PLATFORM(COCOA)
+void WorkQueue::concurrentApply(size_t iterations, const std::function<void (size_t index)>& function)
+{
+ if (!iterations)
+ return;
+
+ if (iterations == 1) {
+ function(0);
+ return;
+ }
+
+ class ThreadPool {
+ public:
+ ThreadPool()
+ {
+ // We don't need a thread for the current core.
+ unsigned threadCount = numberOfProcessorCores() - 1;
+
+ m_workers.reserveInitialCapacity(threadCount);
+ for (unsigned i = 0; i < threadCount; ++i) {
+ m_workers.append(createThread(String::format("ThreadPool Worker %u", i).utf8().data(), [this] {
+ threadBody();
+ }));
+ }
+ }
+
+ size_t workerCount() const { return m_workers.size(); }
+
+ void dispatch(const std::function<void ()>* function)
+ {
+ LockHolder holder(m_lock);
+
+ m_queue.append(function);
+ m_condition.notifyOne();
+ }
+
+ private:
+ NO_RETURN void threadBody()
+ {
+ while (true) {
+ const std::function<void ()>* function;
+
+ {
+ LockHolder holder(m_lock);
+
+ m_condition.wait(m_lock, [this] {
+ return !m_queue.isEmpty();
+ });
+
+ function = m_queue.takeFirst();
+ }
+
+ (*function)();
+ }
+ }
+
+ Lock m_lock;
+ Condition m_condition;
+ Deque<const std::function<void ()>*> m_queue;
+
+ Vector<ThreadIdentifier> m_workers;
+ };
+
+ static LazyNeverDestroyed<ThreadPool> threadPool;
+ static std::once_flag onceFlag;
+ std::call_once(onceFlag, [] {
+ threadPool.construct();
+ });
+
+ // Cap the worker count to the number of iterations (excluding this thread)
+ const size_t workerCount = std::min(iterations - 1, threadPool->workerCount());
+
+ std::atomic<size_t> currentIndex(0);
+ std::atomic<size_t> activeThreads(workerCount + 1);
+
+ Condition condition;
+ Lock lock;
+
+ std::function<void ()> applier = [&] {
+ size_t index;
+
+ // Call the function for as long as there are iterations left.
+ while ((index = currentIndex++) < iterations)
+ function(index);
+
+ // If there are no active threads left, signal the caller.
+ if (!--activeThreads) {
+ LockHolder holder(lock);
+ condition.notifyOne();
+ }
+ };
+
+ for (size_t i = 0; i < workerCount; ++i)
+ threadPool->dispatch(&applier);
+ applier();
+
+ LockHolder holder(lock);
+ condition.wait(lock, [&] { return !activeThreads; });
+}
+#endif
+
+}
diff --git a/Source/WTF/wtf/WorkQueue.h b/Source/WTF/wtf/WorkQueue.h
new file mode 100644
index 000000000..429c38d86
--- /dev/null
+++ b/Source/WTF/wtf/WorkQueue.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2010, 2015 Apple Inc. All rights reserved.
+ * Portions Copyright (c) 2010 Motorola Mobility, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WorkQueue_h
+#define WorkQueue_h
+
+#include <chrono>
+#include <functional>
+#include <wtf/Forward.h>
+#include <wtf/FunctionDispatcher.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Threading.h>
+
+#if USE(COCOA_EVENT_LOOP)
+#include <dispatch/dispatch.h>
+#endif
+
+#if USE(WINDOWS_EVENT_LOOP)
+#include <wtf/Vector.h>
+#endif
+
+#if USE(GLIB_EVENT_LOOP) || USE(GENERIC_EVENT_LOOP)
+#include <wtf/Condition.h>
+#include <wtf/RunLoop.h>
+#endif
+
+namespace WTF {
+
+class WorkQueue final : public FunctionDispatcher {
+public:
+ enum class Type {
+ Serial,
+ Concurrent
+ };
+ enum class QOS {
+ UserInteractive,
+ UserInitiated,
+ Default,
+ Utility,
+ Background
+ };
+
+ WTF_EXPORT_PRIVATE static Ref<WorkQueue> create(const char* name, Type = Type::Serial, QOS = QOS::Default);
+ virtual ~WorkQueue();
+
+ WTF_EXPORT_PRIVATE void dispatch(Function<void ()>&&) override;
+ WTF_EXPORT_PRIVATE void dispatchAfter(std::chrono::nanoseconds, Function<void ()>&&);
+
+ WTF_EXPORT_PRIVATE static void concurrentApply(size_t iterations, const std::function<void (size_t index)>&);
+
+#if USE(COCOA_EVENT_LOOP)
+ dispatch_queue_t dispatchQueue() const { return m_dispatchQueue; }
+#elif USE(GLIB_EVENT_LOOP) || USE(GENERIC_EVENT_LOOP)
+ RunLoop& runLoop() const { return *m_runLoop; }
+#endif
+
+private:
+ explicit WorkQueue(const char* name, Type, QOS);
+
+ void platformInitialize(const char* name, Type, QOS);
+ void platformInvalidate();
+
+#if USE(WINDOWS_EVENT_LOOP)
+ static void CALLBACK timerCallback(void* context, BOOLEAN timerOrWaitFired);
+ static DWORD WINAPI workThreadCallback(void* context);
+
+ bool tryRegisterAsWorkThread();
+ void unregisterAsWorkThread();
+ void performWorkOnRegisteredWorkThread();
+#endif
+
+#if USE(COCOA_EVENT_LOOP)
+ static void executeFunction(void*);
+ dispatch_queue_t m_dispatchQueue;
+#elif USE(WINDOWS_EVENT_LOOP)
+ volatile LONG m_isWorkThreadRegistered;
+
+ Mutex m_functionQueueLock;
+ Vector<Function<void ()>> m_functionQueue;
+
+ HANDLE m_timerQueue;
+#elif USE(GLIB_EVENT_LOOP) || USE(GENERIC_EVENT_LOOP)
+ ThreadIdentifier m_workQueueThread;
+ Lock m_initializeRunLoopConditionMutex;
+ Condition m_initializeRunLoopCondition;
+ RunLoop* m_runLoop;
+#endif
+};
+
+}
+
+using WTF::WorkQueue;
+
+#endif
diff --git a/Source/WTF/wtf/dtoa.cpp b/Source/WTF/wtf/dtoa.cpp
index 5a55320ef..823b161b3 100644
--- a/Source/WTF/wtf/dtoa.cpp
+++ b/Source/WTF/wtf/dtoa.cpp
@@ -3,7 +3,7 @@
* The author of this software is David M. Gay.
*
* Copyright (c) 1991, 2000, 2001 by Lucent Technologies.
- * Copyright (C) 2002, 2005, 2006, 2007, 2008, 2010, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2002, 2005, 2006, 2007, 2008, 2010, 2012, 2015 Apple Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose without fee is hereby granted, provided that this entire notice
@@ -36,6 +36,7 @@
#include "dtoa.h"
#include <stdio.h>
+#include <wtf/Lock.h>
#include <wtf/MathExtras.h>
#include <wtf/Threading.h>
#include <wtf/Vector.h>
@@ -54,7 +55,7 @@
namespace WTF {
-Mutex* s_dtoaP5Mutex;
+static StaticLock s_dtoaP5Mutex;
typedef union {
double d;
@@ -367,7 +368,7 @@ static int p5sCount;
static ALWAYS_INLINE void pow5mult(BigInt& b, int k)
{
- static int p05[3] = { 5, 25, 125 };
+ static const int p05[3] = { 5, 25, 125 };
if (int i = k & 3)
multadd(b, p05[i - 1], 0);
@@ -375,7 +376,7 @@ static ALWAYS_INLINE void pow5mult(BigInt& b, int k)
if (!(k >>= 2))
return;
- s_dtoaP5Mutex->lock();
+ s_dtoaP5Mutex.lock();
P5Node* p5 = p5s;
if (!p5) {
@@ -388,7 +389,7 @@ static ALWAYS_INLINE void pow5mult(BigInt& b, int k)
}
int p5sCountLocal = p5sCount;
- s_dtoaP5Mutex->unlock();
+ s_dtoaP5Mutex.unlock();
int p5sUsed = 0;
for (;;) {
@@ -399,7 +400,7 @@ static ALWAYS_INLINE void pow5mult(BigInt& b, int k)
break;
if (++p5sUsed == p5sCountLocal) {
- s_dtoaP5Mutex->lock();
+ s_dtoaP5Mutex.lock();
if (p5sUsed == p5sCount) {
ASSERT(!p5->next);
p5->next = new P5Node;
@@ -410,7 +411,7 @@ static ALWAYS_INLINE void pow5mult(BigInt& b, int k)
}
p5sCountLocal = p5sCount;
- s_dtoaP5Mutex->unlock();
+ s_dtoaP5Mutex.unlock();
}
p5 = p5->next;
}
diff --git a/Source/WTF/wtf/dtoa.h b/Source/WTF/wtf/dtoa.h
index 39b8f481a..3c5b5b9f4 100644
--- a/Source/WTF/wtf/dtoa.h
+++ b/Source/WTF/wtf/dtoa.h
@@ -21,16 +21,13 @@
#ifndef WTF_dtoa_h
#define WTF_dtoa_h
+#include <unicode/utypes.h>
#include <wtf/ASCIICType.h>
#include <wtf/dtoa/double-conversion.h>
-#include <wtf/unicode/Unicode.h>
+#include <wtf/text/StringView.h>
namespace WTF {
-class Mutex;
-
-extern Mutex* s_dtoaP5Mutex;
-
typedef char DtoaBuffer[80];
WTF_EXPORT_PRIVATE void dtoa(DtoaBuffer result, double dd, bool& sign, int& exponent, unsigned& precision);
@@ -48,6 +45,7 @@ WTF_EXPORT_PRIVATE const char* numberToFixedWidthString(double, unsigned decimal
double parseDouble(const LChar* string, size_t length, size_t& parsedLength);
double parseDouble(const UChar* string, size_t length, size_t& parsedLength);
+double parseDouble(StringView, size_t& parsedLength);
namespace Internal {
WTF_EXPORT_PRIVATE double parseDoubleFromLongString(const UChar* string, size_t length, size_t& parsedLength);
@@ -68,6 +66,13 @@ inline double parseDouble(const UChar* string, size_t length, size_t& parsedLeng
conversionBuffer[i] = isASCII(string[i]) ? string[i] : 0;
return parseDouble(conversionBuffer, length, parsedLength);
}
+
+inline double parseDouble(StringView string, size_t& parsedLength)
+{
+ if (string.is8Bit())
+ return parseDouble(string.characters8(), string.length(), parsedLength);
+ return parseDouble(string.characters16(), string.length(), parsedLength);
+}
} // namespace WTF
diff --git a/Source/WTF/wtf/dtoa/COPYING b/Source/WTF/wtf/dtoa/COPYING
new file mode 100644
index 000000000..933718a9e
--- /dev/null
+++ b/Source/WTF/wtf/dtoa/COPYING
@@ -0,0 +1,26 @@
+Copyright 2006-2011, the V8 project authors. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Source/WTF/wtf/dtoa/LICENSE b/Source/WTF/wtf/dtoa/LICENSE
new file mode 100644
index 000000000..933718a9e
--- /dev/null
+++ b/Source/WTF/wtf/dtoa/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2006-2011, the V8 project authors. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Source/WTF/wtf/dtoa/README b/Source/WTF/wtf/dtoa/README
new file mode 100644
index 000000000..f186b420f
--- /dev/null
+++ b/Source/WTF/wtf/dtoa/README
@@ -0,0 +1,11 @@
+http://code.google.com/p/double-conversion
+
+This project (double-conversion) provides binary-decimal and decimal-binary
+routines for IEEE doubles.
+
+The library consists of efficient conversion routines that have been extracted
+from the V8 JavaScript engine. The code has been refactored and improved so that
+it can be used more easily in other projects.
+
+There is extensive documentation in src/double-conversion.h. Other examples can
+be found in test/cctest/test-conversions.cc.
diff --git a/Source/WTF/wtf/dtoa/bignum.cc b/Source/WTF/wtf/dtoa/bignum.cc
index 9bb9049a2..8f6f73a8e 100644
--- a/Source/WTF/wtf/dtoa/bignum.cc
+++ b/Source/WTF/wtf/dtoa/bignum.cc
@@ -29,6 +29,7 @@
#include "bignum.h"
#include "utils.h"
+#include <wtf/ASCIICType.h>
namespace WTF {
@@ -93,8 +94,8 @@ namespace double_conversion {
int from,
int digits_to_read) {
uint64_t result = 0;
- for (int i = from; i < from + digits_to_read; ++i) {
- int digit = buffer[i] - '0';
+ for (int i = 0; i < digits_to_read; ++i) {
+ int digit = buffer[from + i] - '0';
ASSERT(0 <= digit && digit <= 9);
result = result * 10 + digit;
}
@@ -123,15 +124,6 @@ namespace double_conversion {
}
- static int HexCharValue(char c) {
- if ('0' <= c && c <= '9') return c - '0';
- if ('a' <= c && c <= 'f') return 10 + c - 'a';
- if ('A' <= c && c <= 'F') return 10 + c - 'A';
- UNREACHABLE();
- return 0; // To make compiler happy.
- }
-
-
void Bignum::AssignHexString(BufferReference<const char> value) {
Zero();
int length = value.length();
@@ -143,7 +135,7 @@ namespace double_conversion {
// These bigits are guaranteed to be "full".
Chunk current_bigit = 0;
for (int j = 0; j < kBigitSize / 4; j++) {
- current_bigit += HexCharValue(value[string_index--]) << (j * 4);
+ current_bigit += toASCIIHexValue(value[string_index--]) << (j * 4);
}
bigits_[i] = current_bigit;
}
@@ -152,7 +144,7 @@ namespace double_conversion {
Chunk most_significant_bigit = 0; // Could be = 0;
for (int j = 0; j <= string_index; ++j) {
most_significant_bigit <<= 4;
- most_significant_bigit += HexCharValue(value[j]);
+ most_significant_bigit += toASCIIHexValue(value[j]);
}
if (most_significant_bigit != 0) {
bigits_[used_digits_] = most_significant_bigit;
diff --git a/Source/WTF/wtf/dtoa/double-conversion.cc b/Source/WTF/wtf/dtoa/double-conversion.cc
index abf287a8b..6bf360245 100644
--- a/Source/WTF/wtf/dtoa/double-conversion.cc
+++ b/Source/WTF/wtf/dtoa/double-conversion.cc
@@ -26,10 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "config.h"
-
-#include <limits.h>
-#include <math.h>
-
#include "double-conversion.h"
#include "bignum-dtoa.h"
@@ -38,6 +34,9 @@
#include "fixed-dtoa.h"
#include "strtod.h"
#include "utils.h"
+#include <limits.h>
+#include <math.h>
+#include <wtf/ASCIICType.h>
namespace WTF {
@@ -483,7 +482,7 @@ namespace double_conversion {
}
// Copy significant digits of the integer part (if any) to the buffer.
- while (*current >= '0' && *current <= '9') {
+ while (isASCIIDigit(*current)) {
if (significant_digits < kMaxSignificantDigits) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
@@ -520,7 +519,7 @@ namespace double_conversion {
}
// There is a fractional part.
- while (*current >= '0' && *current <= '9') {
+ while (isASCIIDigit(*current)) {
if (significant_digits < kMaxSignificantDigits) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
@@ -580,7 +579,7 @@ namespace double_conversion {
num = num * 10 + digit;
}
++current;
- } while (current != end && *current >= '0' && *current <= '9');
+ } while (current != end && isASCIIDigit(*current));
exponent += (sign == '-' ? -num : num);
}
diff --git a/Source/WTF/wtf/dtoa/strtod.cc b/Source/WTF/wtf/dtoa/strtod.cc
index 5f63b22ad..5270c3e59 100644
--- a/Source/WTF/wtf/dtoa/strtod.cc
+++ b/Source/WTF/wtf/dtoa/strtod.cc
@@ -38,11 +38,13 @@
namespace WTF {
namespace double_conversion {
-
+
+#if defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
// 2^53 = 9007199254740992.
// Any integer with at most 15 decimal digits will hence fit into a double
// (which has a 53bit significand) without loss of precision.
static const int kMaxExactDoubleIntegerDecimalDigits = 15;
+#endif
// 2^64 = 18446744073709551616 > 10^19
static const int kMaxUint64DecimalDigits = 19;
@@ -58,7 +60,7 @@ namespace double_conversion {
// 2^64 = 18446744073709551616
static const uint64_t kMaxUint64 = UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF);
-
+#if defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
static const double exact_powers_of_ten[] = {
1.0, // 10^0
10.0,
@@ -86,6 +88,7 @@ namespace double_conversion {
10000000000000000000000.0
};
static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
+#endif
// Maximum number of significant digits in the decimal representation.
// In fact the value is 772 (see conversions.cc), but to give us some margin
@@ -177,6 +180,9 @@ namespace double_conversion {
int exponent,
double* result) {
#if !defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+ UNUSED_PARAM(trimmed);
+ UNUSED_PARAM(exponent);
+ UNUSED_PARAM(result);
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
@@ -184,7 +190,7 @@ namespace double_conversion {
// Note that the ARM simulator is compiled for 32bits. It therefore exhibits
// the same problem.
return false;
-#endif
+#else
if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
int read_digits;
// The trimmed input fits into a double.
@@ -222,6 +228,7 @@ namespace double_conversion {
}
}
return false;
+#endif
}
diff --git a/Source/WTF/wtf/dtoa/utils.h b/Source/WTF/wtf/dtoa/utils.h
index 667694d75..889642cee 100644
--- a/Source/WTF/wtf/dtoa/utils.h
+++ b/Source/WTF/wtf/dtoa/utils.h
@@ -49,7 +49,7 @@
defined(__ARMEL__) || \
defined(_MIPS_ARCH_MIPS32R2)
#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
-#elif CPU(MIPS) || CPU(MIPS64) || CPU(PPC) || CPU(PPC64) || CPU(PPC64LE) || OS(WINCE) || CPU(SH4) || CPU(S390) || CPU(S390X) || CPU(IA64) || CPU(ALPHA) || CPU(ARM64) || CPU(HPPA)
+#elif CPU(MIPS) || CPU(MIPS64) || CPU(PPC) || CPU(PPC64) || CPU(PPC64LE) || CPU(SH4) || CPU(S390) || CPU(S390X) || CPU(IA64) || CPU(ALPHA) || CPU(ARM64) || CPU(HPPA)
#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
#elif defined(_M_IX86) || defined(__i386__)
#if defined(_WIN32)
@@ -58,8 +58,6 @@ defined(_MIPS_ARCH_MIPS32R2)
#else
#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
#endif // _WIN32
-#elif defined(WINCE) || defined(_WIN32_WCE)
-#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
#else
#error Target architecture was not detected as supported by Double-Conversion.
#endif
@@ -294,7 +292,7 @@ namespace double_conversion {
inline Dest BitCast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
- typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
+ static_assert(sizeof(Dest) == sizeof(Source), "Source and destination sizes must be equal");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
diff --git a/Source/WTF/wtf/generic/MainThreadGeneric.cpp b/Source/WTF/wtf/generic/MainThreadGeneric.cpp
new file mode 100644
index 000000000..c849c6951
--- /dev/null
+++ b/Source/WTF/wtf/generic/MainThreadGeneric.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Konstantin Tokavev <annulen@yandex.ru>
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MainThread.h"
+
+#include <wtf/RunLoop.h>
+
+namespace WTF {
+
+void initializeMainThreadPlatform()
+{
+}
+
+void scheduleDispatchFunctionsOnMainThread()
+{
+ RunLoop::main().dispatch(std::function<void()>(dispatchFunctionsFromMainThread));
+}
+
+}
diff --git a/Source/WTF/wtf/generic/RunLoopGeneric.cpp b/Source/WTF/wtf/generic/RunLoopGeneric.cpp
new file mode 100644
index 000000000..9a3c09a85
--- /dev/null
+++ b/Source/WTF/wtf/generic/RunLoopGeneric.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2016 Konstantin Tokavev <annulen@yandex.ru>
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RunLoop.h"
+
+namespace WTF {
+
+class RunLoop::TimerBase::ScheduledTask : public ThreadSafeRefCounted<ScheduledTask> {
+WTF_MAKE_NONCOPYABLE(ScheduledTask);
+public:
+ static RefPtr<ScheduledTask> create(Function<void()>&& function, Seconds interval, bool repeating)
+ {
+ return adoptRef(new ScheduledTask(WTFMove(function), interval, repeating));
+ }
+
+ ScheduledTask(Function<void()>&& function, Seconds interval, bool repeating)
+ : m_function(WTFMove(function))
+ , m_fireInterval(interval)
+ , m_isRepeating(repeating)
+ {
+ updateReadyTime();
+ }
+
+ bool fired()
+ {
+ if (!isActive())
+ return false;
+
+ m_function();
+
+ if (!m_isRepeating)
+ return false;
+
+ updateReadyTime();
+ return isActive();
+ }
+
+ MonotonicTime scheduledTimePoint() const
+ {
+ return m_scheduledTimePoint;
+ }
+
+ void updateReadyTime()
+ {
+ m_scheduledTimePoint = MonotonicTime::now();
+ if (!m_fireInterval)
+ return;
+ m_scheduledTimePoint += m_fireInterval;
+ }
+
+ struct EarliestSchedule {
+ bool operator()(const RefPtr<ScheduledTask>& lhs, const RefPtr<ScheduledTask>& rhs)
+ {
+ return lhs->scheduledTimePoint() > rhs->scheduledTimePoint();
+ }
+ };
+
+ bool isActive() const
+ {
+ return m_isActive.load();
+ }
+
+ void deactivate()
+ {
+ m_isActive.store(false);
+ }
+
+private:
+ Function<void ()> m_function;
+ MonotonicTime m_scheduledTimePoint;
+ Seconds m_fireInterval;
+ std::atomic<bool> m_isActive { true };
+ bool m_isRepeating;
+};
+
+RunLoop::RunLoop()
+{
+}
+
+RunLoop::~RunLoop()
+{
+ LockHolder locker(m_loopLock);
+ m_shutdown = true;
+ m_readyToRun.notifyOne();
+
+ // Here is running main loops. Wait until all the main loops are destroyed.
+ if (!m_mainLoops.isEmpty())
+ m_stopCondition.wait(m_loopLock);
+}
+
+inline bool RunLoop::populateTasks(RunMode runMode, Status& statusOfThisLoop, Deque<RefPtr<TimerBase::ScheduledTask>>& firedTimers)
+{
+ LockHolder locker(m_loopLock);
+
+ if (runMode == RunMode::Drain) {
+ MonotonicTime sleepUntil = MonotonicTime::infinity();
+ if (!m_schedules.isEmpty())
+ sleepUntil = m_schedules.first()->scheduledTimePoint();
+
+ m_readyToRun.waitUntil(m_loopLock, sleepUntil, [&] {
+ return m_shutdown || m_pendingTasks || statusOfThisLoop == Status::Stopping;
+ });
+ }
+
+ if (statusOfThisLoop == Status::Stopping || m_shutdown) {
+ m_mainLoops.removeLast();
+ if (m_mainLoops.isEmpty())
+ m_stopCondition.notifyOne();
+ return false;
+ }
+ m_pendingTasks = false;
+ if (runMode == RunMode::Iterate)
+ statusOfThisLoop = Status::Stopping;
+
+ // Check expired timers.
+ MonotonicTime now = MonotonicTime::now();
+ while (!m_schedules.isEmpty()) {
+ RefPtr<TimerBase::ScheduledTask> earliest = m_schedules.first();
+ if (earliest->scheduledTimePoint() > now)
+ break;
+ std::pop_heap(m_schedules.begin(), m_schedules.end(), TimerBase::ScheduledTask::EarliestSchedule());
+ m_schedules.removeLast();
+ firedTimers.append(earliest);
+ }
+
+ return true;
+}
+
+void RunLoop::runImpl(RunMode runMode)
+{
+ ASSERT(this == &RunLoop::current());
+
+ Status statusOfThisLoop = Status::Clear;
+ {
+ LockHolder locker(m_loopLock);
+ m_mainLoops.append(&statusOfThisLoop);
+ }
+
+ Deque<RefPtr<TimerBase::ScheduledTask>> firedTimers;
+ while (true) {
+ if (!populateTasks(runMode, statusOfThisLoop, firedTimers))
+ return;
+
+ // Dispatch scheduled timers.
+ while (!firedTimers.isEmpty()) {
+ RefPtr<TimerBase::ScheduledTask> task = firedTimers.takeFirst();
+ if (task->fired()) {
+ // Reschedule because the timer requires repeating.
+ // Since we will query the timers' time points before sleeping,
+ // we do not call wakeUp() here.
+ schedule(WTFMove(task));
+ }
+ }
+ performWork();
+ }
+}
+
+void RunLoop::run()
+{
+ RunLoop::current().runImpl(RunMode::Drain);
+}
+
+void RunLoop::iterate()
+{
+ RunLoop::current().runImpl(RunMode::Iterate);
+}
+
+// RunLoop operations are thread-safe. These operations can be called from outside of the RunLoop's thread.
+// For example, WorkQueue::{dispatch, dispatchAfter} call the operations of the WorkQueue thread's RunLoop
+// from the caller's thread.
+
+void RunLoop::stop()
+{
+ LockHolder locker(m_loopLock);
+ if (m_mainLoops.isEmpty())
+ return;
+
+ Status* status = m_mainLoops.last();
+ if (*status != Status::Stopping) {
+ *status = Status::Stopping;
+ m_readyToRun.notifyOne();
+ }
+}
+
+void RunLoop::wakeUp(const LockHolder&)
+{
+ m_pendingTasks = true;
+ m_readyToRun.notifyOne();
+}
+
+void RunLoop::wakeUp()
+{
+ LockHolder locker(m_loopLock);
+ wakeUp(locker);
+}
+
+void RunLoop::schedule(const LockHolder&, RefPtr<TimerBase::ScheduledTask>&& task)
+{
+ m_schedules.append(WTFMove(task));
+ std::push_heap(m_schedules.begin(), m_schedules.end(), TimerBase::ScheduledTask::EarliestSchedule());
+}
+
+void RunLoop::schedule(RefPtr<TimerBase::ScheduledTask>&& task)
+{
+ LockHolder locker(m_loopLock);
+ schedule(locker, WTFMove(task));
+}
+
+void RunLoop::scheduleAndWakeUp(RefPtr<TimerBase::ScheduledTask> task)
+{
+ LockHolder locker(m_loopLock);
+ schedule(locker, WTFMove(task));
+ wakeUp(locker);
+}
+
+void RunLoop::dispatchAfter(std::chrono::nanoseconds delay, Function<void ()>&& function)
+{
+ LockHolder locker(m_loopLock);
+ bool repeating = false;
+ schedule(locker, TimerBase::ScheduledTask::create(WTFMove(function), Seconds(delay.count() / 1000.0 / 1000.0 / 1000.0), repeating));
+ wakeUp(locker);
+}
+
+// Since RunLoop does not own the registered TimerBase,
+// TimerBase and its owner should manage these lifetime.
+//
+// And more importantly, TimerBase operations are not thread-safe.
+// So threads that do not belong to the ScheduledTask's RunLoop
+// should not operate the RunLoop::TimerBase.
+// This is the same to the RunLoopWin, which is RunLoop for Windows.
+RunLoop::TimerBase::TimerBase(RunLoop& runLoop)
+ : m_runLoop(runLoop)
+ , m_scheduledTask(nullptr)
+{
+}
+
+RunLoop::TimerBase::~TimerBase()
+{
+ stop();
+}
+
+void RunLoop::TimerBase::start(double interval, bool repeating)
+{
+ stop();
+ m_scheduledTask = ScheduledTask::create([this] {
+ fired();
+ }, Seconds(interval), repeating);
+ m_runLoop.scheduleAndWakeUp(m_scheduledTask);
+}
+
+void RunLoop::TimerBase::stop()
+{
+ if (m_scheduledTask) {
+ m_scheduledTask->deactivate();
+ m_scheduledTask = nullptr;
+ }
+}
+
+bool RunLoop::TimerBase::isActive() const
+{
+ return m_scheduledTask;
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/generic/WorkQueueGeneric.cpp b/Source/WTF/wtf/generic/WorkQueueGeneric.cpp
new file mode 100644
index 000000000..f594e2401
--- /dev/null
+++ b/Source/WTF/wtf/generic/WorkQueueGeneric.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 Konstantin Tokavev <annulen@yandex.ru>
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ * Copyright (C) 2011 Igalia S.L.
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Portions Copyright (c) 2010 Motorola Mobility, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WorkQueue.h"
+
+#include <wtf/text/WTFString.h>
+
+void WorkQueue::platformInitialize(const char* name, Type, QOS)
+{
+ LockHolder locker(m_initializeRunLoopConditionMutex);
+ m_workQueueThread = createThread(name, [this] {
+ {
+ LockHolder locker(m_initializeRunLoopConditionMutex);
+ m_runLoop = &RunLoop::current();
+ m_initializeRunLoopCondition.notifyOne();
+ }
+ m_runLoop->run();
+ });
+ m_initializeRunLoopCondition.wait(m_initializeRunLoopConditionMutex);
+}
+
+void WorkQueue::platformInvalidate()
+{
+ if (m_runLoop)
+ m_runLoop->stop();
+ if (m_workQueueThread) {
+ detachThread(m_workQueueThread);
+ m_workQueueThread = 0;
+ }
+}
+
+void WorkQueue::dispatch(Function<void ()>&& function)
+{
+ RefPtr<WorkQueue> protect(this);
+ m_runLoop->dispatch([protect, function = WTFMove(function)] {
+ function();
+ });
+}
+
+void WorkQueue::dispatchAfter(std::chrono::nanoseconds delay, Function<void ()>&& function)
+{
+ RefPtr<WorkQueue> protect(this);
+ m_runLoop->dispatchAfter(delay, [protect, function = WTFMove(function)] {
+ function();
+ });
+}
diff --git a/Source/WTF/wtf/gobject/GlibUtilities.cpp b/Source/WTF/wtf/glib/GLibUtilities.cpp
index f08e33345..5629b5715 100644
--- a/Source/WTF/wtf/gobject/GlibUtilities.cpp
+++ b/Source/WTF/wtf/glib/GLibUtilities.cpp
@@ -18,7 +18,7 @@
*/
#include "config.h"
-#include "GlibUtilities.h"
+#include "GLibUtilities.h"
#if OS(WINDOWS)
#include <windows.h>
diff --git a/Source/WTF/wtf/gobject/GlibUtilities.h b/Source/WTF/wtf/glib/GLibUtilities.h
index ce10a05c8..da22596b1 100644
--- a/Source/WTF/wtf/gobject/GlibUtilities.h
+++ b/Source/WTF/wtf/glib/GLibUtilities.h
@@ -17,12 +17,21 @@
* Boston, MA 02110-1301, USA.
*/
-#ifndef GlibUtilities_h
-#define GlibUtilities_h
+#ifndef GLibUtilities_h
+#define GLibUtilities_h
#include <wtf/Assertions.h>
#include <wtf/text/CString.h>
CString getCurrentExecutablePath();
+// These might be added to glib in the future, but in the meantime they're defined here.
+#ifndef GULONG_TO_POINTER
+#define GULONG_TO_POINTER(ul) ((gpointer) (gulong) (ul))
+#endif
+
+#ifndef GPOINTER_TO_ULONG
+#define GPOINTER_TO_ULONG(p) ((gulong) (p))
+#endif
+
#endif
diff --git a/Source/WTF/wtf/gobject/GMutexLocker.h b/Source/WTF/wtf/glib/GMutexLocker.h
index 87b406319..5efbff000 100644
--- a/Source/WTF/wtf/gobject/GMutexLocker.h
+++ b/Source/WTF/wtf/glib/GMutexLocker.h
@@ -23,49 +23,79 @@
#if USE(GLIB)
#include <glib.h>
-
-#include <wtf/FastMalloc.h>
#include <wtf/Noncopyable.h>
namespace WTF {
-class GMutexLocker {
- WTF_MAKE_NONCOPYABLE(GMutexLocker); WTF_MAKE_FAST_ALLOCATED;
+template<typename T>
+struct MutexWrapper;
+
+template<>
+struct MutexWrapper<GMutex> {
+ static void lock(GMutex* mutex)
+ {
+ g_mutex_lock(mutex);
+ }
+
+ static void unlock(GMutex* mutex)
+ {
+ g_mutex_unlock(mutex);
+ }
+};
+
+template<>
+struct MutexWrapper<GRecMutex> {
+ static void lock(GRecMutex* mutex)
+ {
+ g_rec_mutex_lock(mutex);
+ }
+
+ static void unlock(GRecMutex* mutex)
+ {
+ g_rec_mutex_unlock(mutex);
+ }
+};
+template<typename T>
+class GMutexLocker {
+ WTF_MAKE_NONCOPYABLE(GMutexLocker);
public:
- inline explicit GMutexLocker(GMutex* mutex)
+ explicit GMutexLocker(T& mutex)
: m_mutex(mutex)
- , m_val(0)
+ , m_locked(false)
{
lock();
}
- inline ~GMutexLocker() { unlock(); }
-
- inline void lock()
+ ~GMutexLocker()
{
- if (m_mutex && !m_val) {
- g_mutex_lock(m_mutex);
- m_val = 1;
- }
+ unlock();
}
- inline void unlock()
+ void lock()
{
- if (m_mutex && m_val) {
- m_val = 0;
- g_mutex_unlock(m_mutex);
- }
+ if (m_locked)
+ return;
+
+ MutexWrapper<T>::lock(&m_mutex);
+ m_locked = true;
}
- inline GMutex* mutex() const { return m_mutex; }
+ void unlock()
+ {
+ if (!m_locked)
+ return;
+
+ m_locked = false;
+ MutexWrapper<T>::unlock(&m_mutex);
+ }
private:
- GMutex* m_mutex;
- uint8_t m_val;
+ T& m_mutex;
+ bool m_locked;
};
-}
+} // namespace WTF
#endif // USE(GLIB)
diff --git a/Source/WTF/wtf/gobject/GRefPtr.cpp b/Source/WTF/wtf/glib/GRefPtr.cpp
index 52a9a3942..c522095af 100644
--- a/Source/WTF/wtf/gobject/GRefPtr.cpp
+++ b/Source/WTF/wtf/glib/GRefPtr.cpp
@@ -35,7 +35,8 @@ template <> GHashTable* refGPtr(GHashTable* ptr)
template <> void derefGPtr(GHashTable* ptr)
{
- g_hash_table_unref(ptr);
+ if (ptr)
+ g_hash_table_unref(ptr);
}
template <> GMainContext* refGPtr(GMainContext* ptr)
@@ -64,7 +65,6 @@ template <> void derefGPtr(GMainLoop* ptr)
g_main_loop_unref(ptr);
}
-#if GLIB_CHECK_VERSION(2, 32, 0)
template <> GBytes* refGPtr(GBytes* ptr)
{
if (ptr)
@@ -78,33 +78,30 @@ template <> void derefGPtr(GBytes* ptr)
g_bytes_unref(ptr);
}
-# else
-
-typedef struct _GBytes {
- bool fake;
-} GBytes;
-
-template <> GBytes* refGPtr(GBytes* ptr)
+template <> GVariant* refGPtr(GVariant* ptr)
{
+ if (ptr)
+ g_variant_ref_sink(ptr);
return ptr;
}
-template <> void derefGPtr(GBytes* ptr)
+template <> void derefGPtr(GVariant* ptr)
{
+ if (ptr)
+ g_variant_unref(ptr);
}
-#endif
-
-template <> GVariant* refGPtr(GVariant* ptr)
+template <> GVariantBuilder* refGPtr(GVariantBuilder* ptr)
{
if (ptr)
- g_variant_ref_sink(ptr);
+ g_variant_builder_ref(ptr);
return ptr;
}
-template <> void derefGPtr(GVariant* ptr)
+template <> void derefGPtr(GVariantBuilder* ptr)
{
- g_variant_unref(ptr);
+ if (ptr)
+ g_variant_builder_unref(ptr);
}
template <> GSource* refGPtr(GSource* ptr)
diff --git a/Source/WTF/wtf/gobject/GRefPtr.h b/Source/WTF/wtf/glib/GRefPtr.h
index b25b7bbc6..d05084b66 100644
--- a/Source/WTF/wtf/gobject/GRefPtr.h
+++ b/Source/WTF/wtf/glib/GRefPtr.h
@@ -25,6 +25,7 @@
#if USE(GLIB)
+#include <wtf/GetPtr.h>
#include <wtf/RefPtr.h>
#include <algorithm>
@@ -41,6 +42,9 @@ template <typename T> GRefPtr<T> adoptGRef(T*);
template <typename T> class GRefPtr {
public:
+ typedef T ValueType;
+ typedef ValueType* PtrType;
+
GRefPtr() : m_ptr(0) { }
GRefPtr(T* ptr)
@@ -64,6 +68,9 @@ public:
refGPtr(ptr);
}
+ GRefPtr(GRefPtr&& o) : m_ptr(o.leakRef()) { }
+ template <typename U> GRefPtr(GRefPtr<U>&& o) : m_ptr(o.leakRef()) { }
+
~GRefPtr()
{
if (T* ptr = m_ptr)
@@ -106,6 +113,7 @@ public:
operator UnspecifiedBoolType() const { return m_ptr ? &GRefPtr::m_ptr : 0; }
GRefPtr& operator=(const GRefPtr&);
+ GRefPtr& operator=(GRefPtr&&);
GRefPtr& operator=(T*);
template <typename U> GRefPtr& operator=(const GRefPtr<U>&);
@@ -132,6 +140,13 @@ template <typename T> inline GRefPtr<T>& GRefPtr<T>::operator=(const GRefPtr<T>&
return *this;
}
+template <typename T> inline GRefPtr<T>& GRefPtr<T>::operator=(GRefPtr<T>&& o)
+{
+ GRefPtr ptr = WTFMove(o);
+ swap(ptr);
+ return *this;
+}
+
template <typename T> inline GRefPtr<T>& GRefPtr<T>::operator=(T* optr)
{
T* ptr = m_ptr;
@@ -193,34 +208,35 @@ template <typename T, typename U> inline GRefPtr<T> const_pointer_cast(const GRe
return GRefPtr<T>(const_cast<T*>(p.get()));
}
-template <typename T> inline T* getPtr(const GRefPtr<T>& p)
-{
- return p.get();
-}
+template <typename T> struct IsSmartPtr<GRefPtr<T>> {
+ static const bool value = true;
+};
template <typename T> GRefPtr<T> adoptGRef(T* p)
{
return GRefPtr<T>(p, GRefPtrAdopt);
}
-template <> GHashTable* refGPtr(GHashTable* ptr);
-template <> void derefGPtr(GHashTable* ptr);
-template <> GMainContext* refGPtr(GMainContext* ptr);
-template <> void derefGPtr(GMainContext* ptr);
-template <> GMainLoop* refGPtr(GMainLoop* ptr);
-template <> void derefGPtr(GMainLoop* ptr);
-template <> GVariant* refGPtr(GVariant* ptr);
-template <> void derefGPtr(GVariant* ptr);
-template <> GSource* refGPtr(GSource* ptr);
-template <> void derefGPtr(GSource* ptr);
-template <> GPtrArray* refGPtr(GPtrArray*);
-template <> void derefGPtr(GPtrArray*);
-template <> GByteArray* refGPtr(GByteArray*);
-template <> void derefGPtr(GByteArray*);
-template <> GBytes* refGPtr(GBytes*);
-template <> void derefGPtr(GBytes*);
-template <> GClosure* refGPtr(GClosure*);
-template <> void derefGPtr(GClosure*);
+template <> WTF_EXPORT_PRIVATE GHashTable* refGPtr(GHashTable* ptr);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GHashTable* ptr);
+template <> WTF_EXPORT_PRIVATE GMainContext* refGPtr(GMainContext* ptr);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GMainContext* ptr);
+template <> WTF_EXPORT_PRIVATE GMainLoop* refGPtr(GMainLoop* ptr);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GMainLoop* ptr);
+template <> WTF_EXPORT_PRIVATE GVariant* refGPtr(GVariant* ptr);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GVariant* ptr);
+template <> WTF_EXPORT_PRIVATE GVariantBuilder* refGPtr(GVariantBuilder* ptr);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GVariantBuilder* ptr);
+template <> WTF_EXPORT_PRIVATE GSource* refGPtr(GSource* ptr);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GSource* ptr);
+template <> WTF_EXPORT_PRIVATE GPtrArray* refGPtr(GPtrArray*);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GPtrArray*);
+template <> WTF_EXPORT_PRIVATE GByteArray* refGPtr(GByteArray*);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GByteArray*);
+template <> WTF_EXPORT_PRIVATE GBytes* refGPtr(GBytes*);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GBytes*);
+template <> WTF_EXPORT_PRIVATE GClosure* refGPtr(GClosure*);
+template <> WTF_EXPORT_PRIVATE void derefGPtr(GClosure*);
template <typename T> inline T* refGPtr(T* ptr)
{
diff --git a/Source/WTF/wtf/gobject/GTypedefs.h b/Source/WTF/wtf/glib/GTypedefs.h
index 9990792fd..51053c03d 100644
--- a/Source/WTF/wtf/gobject/GTypedefs.h
+++ b/Source/WTF/wtf/glib/GTypedefs.h
@@ -17,8 +17,8 @@
* Boston, MA 02110-1301, USA.
*/
-#ifndef GtkTypedefs_h
-#define GtkTypedefs_h
+#ifndef WTF_GTypedefs_h
+#define WTF_GTypedefs_h
/* Vanilla C code does not seem to be able to handle forward-declaration typedefs. */
#ifdef __cplusplus
@@ -60,6 +60,8 @@ typedef struct _GSocketClient GSocketClient;
typedef struct _GSocketConnection GSocketConnection;
typedef struct _GSource GSource;
typedef struct _GVariant GVariant;
+typedef struct _GVariantBuilder GVariantBuilder;
+typedef struct _GVariantIter GVariantIter;
typedef union _GdkEvent GdkEvent;
typedef struct _GTimer GTimer;
typedef struct _GKeyFile GKeyFile;
@@ -106,4 +108,4 @@ typedef struct _GtkStyleContext GtkStyleContext;
#endif
#endif
-#endif /* GtkTypedefs_h */
+#endif // WTF_GTypedefs_h
diff --git a/Source/WTF/wtf/gobject/GUniquePtr.h b/Source/WTF/wtf/glib/GUniquePtr.h
index bcfaa3ff8..e1cff8dde 100644
--- a/Source/WTF/wtf/gobject/GUniquePtr.h
+++ b/Source/WTF/wtf/glib/GUniquePtr.h
@@ -43,15 +43,16 @@ using GUniquePtr = std::unique_ptr<T, GPtrDeleter<T>>;
macro(GPatternSpec, g_pattern_spec_free) \
macro(GDir, g_dir_close) \
macro(GTimer, g_timer_destroy) \
- macro(GKeyFile, g_key_file_free)
+ macro(GKeyFile, g_key_file_free) \
+ macro(char*, g_strfreev) \
+ macro(GVariantIter, g_variant_iter_free)
#define WTF_DEFINE_GPTR_DELETER(typeName, deleterFunc) \
template<> struct GPtrDeleter<typeName> \
{ \
void operator() (typeName* ptr) const \
{ \
- if (ptr) \
- deleterFunc(ptr); \
+ deleterFunc(ptr); \
} \
};
diff --git a/Source/WTF/wtf/glib/MainThreadGLib.cpp b/Source/WTF/wtf/glib/MainThreadGLib.cpp
new file mode 100644
index 000000000..c2fa56fcf
--- /dev/null
+++ b/Source/WTF/wtf/glib/MainThreadGLib.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MainThread.h"
+
+#include <glib.h>
+#include <wtf/RunLoop.h>
+
+static pthread_t mainThreadPthread;
+
+namespace WTF {
+
+class MainThreadDispatcher {
+public:
+ MainThreadDispatcher()
+ : m_timer(RunLoop::main(), this, &MainThreadDispatcher::fired)
+ {
+ m_timer.setPriority(G_PRIORITY_HIGH_IDLE + 20);
+ }
+
+ void schedule()
+ {
+ m_timer.startOneShot(0);
+ }
+
+private:
+ void fired()
+ {
+ dispatchFunctionsFromMainThread();
+ }
+
+ RunLoop::Timer<MainThreadDispatcher> m_timer;
+};
+
+void initializeMainThreadPlatform()
+{
+ mainThreadPthread = pthread_self();
+}
+
+bool isMainThread()
+{
+ ASSERT(mainThreadPthread);
+ return pthread_equal(pthread_self(), mainThreadPthread);
+}
+
+void scheduleDispatchFunctionsOnMainThread()
+{
+ // Use a RunLoop::Timer instead of RunLoop::dispatch() to be able to use a different priority and
+ // avoid the double queue because dispatchOnMainThread also queues the functions.
+ static MainThreadDispatcher dispatcher;
+ dispatcher.schedule();
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/glib/RunLoopGLib.cpp b/Source/WTF/wtf/glib/RunLoopGLib.cpp
new file mode 100644
index 000000000..927ce3254
--- /dev/null
+++ b/Source/WTF/wtf/glib/RunLoopGLib.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Portions Copyright (c) 2010 Motorola Mobility, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RunLoop.h"
+
+#include <glib.h>
+#include <wtf/MainThread.h>
+
+namespace WTF {
+
+static GSourceFuncs runLoopSourceFunctions = {
+ nullptr, // prepare
+ nullptr, // check
+ // dispatch
+ [](GSource* source, GSourceFunc callback, gpointer userData) -> gboolean
+ {
+ if (g_source_get_ready_time(source) == -1)
+ return G_SOURCE_CONTINUE;
+ g_source_set_ready_time(source, -1);
+ return callback(userData);
+ },
+ nullptr, // finalize
+ nullptr, // closure_callback
+ nullptr, // closure_marshall
+};
+
+RunLoop::RunLoop()
+{
+ m_mainContext = g_main_context_get_thread_default();
+ if (!m_mainContext)
+ m_mainContext = isMainThread() ? g_main_context_default() : adoptGRef(g_main_context_new());
+ ASSERT(m_mainContext);
+
+ GRefPtr<GMainLoop> innermostLoop = adoptGRef(g_main_loop_new(m_mainContext.get(), FALSE));
+ ASSERT(innermostLoop);
+ m_mainLoops.append(innermostLoop);
+
+ m_source = adoptGRef(g_source_new(&runLoopSourceFunctions, sizeof(GSource)));
+ g_source_set_name(m_source.get(), "[WebKit] RunLoop work");
+ g_source_set_can_recurse(m_source.get(), TRUE);
+ g_source_set_callback(m_source.get(), [](gpointer userData) -> gboolean {
+ static_cast<RunLoop*>(userData)->performWork();
+ return G_SOURCE_CONTINUE;
+ }, this, nullptr);
+ g_source_attach(m_source.get(), m_mainContext.get());
+}
+
+RunLoop::~RunLoop()
+{
+ g_source_destroy(m_source.get());
+
+ for (int i = m_mainLoops.size() - 1; i >= 0; --i) {
+ if (!g_main_loop_is_running(m_mainLoops[i].get()))
+ continue;
+ g_main_loop_quit(m_mainLoops[i].get());
+ }
+}
+
+void RunLoop::run()
+{
+ RunLoop& runLoop = RunLoop::current();
+ GMainContext* mainContext = runLoop.m_mainContext.get();
+
+ // The innermost main loop should always be there.
+ ASSERT(!runLoop.m_mainLoops.isEmpty());
+
+ GMainLoop* innermostLoop = runLoop.m_mainLoops[0].get();
+ if (!g_main_loop_is_running(innermostLoop)) {
+ g_main_context_push_thread_default(mainContext);
+ g_main_loop_run(innermostLoop);
+ g_main_context_pop_thread_default(mainContext);
+ return;
+ }
+
+ // Create and run a nested loop if the innermost one was already running.
+ GMainLoop* nestedMainLoop = g_main_loop_new(mainContext, FALSE);
+ runLoop.m_mainLoops.append(adoptGRef(nestedMainLoop));
+
+ g_main_context_push_thread_default(mainContext);
+ g_main_loop_run(nestedMainLoop);
+ g_main_context_pop_thread_default(mainContext);
+
+ runLoop.m_mainLoops.removeLast();
+}
+
+void RunLoop::stop()
+{
+ // The innermost main loop should always be there.
+ ASSERT(!m_mainLoops.isEmpty());
+ GRefPtr<GMainLoop> lastMainLoop = m_mainLoops.last();
+ if (g_main_loop_is_running(lastMainLoop.get()))
+ g_main_loop_quit(lastMainLoop.get());
+}
+
+void RunLoop::wakeUp()
+{
+ g_source_set_ready_time(m_source.get(), g_get_monotonic_time());
+}
+
+class DispatchAfterContext {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ DispatchAfterContext(Function<void ()>&& function)
+ : m_function(WTFMove(function))
+ {
+ }
+
+ void dispatch()
+ {
+ m_function();
+ }
+
+private:
+ Function<void ()> m_function;
+};
+
+void RunLoop::dispatchAfter(std::chrono::nanoseconds duration, Function<void ()>&& function)
+{
+ GRefPtr<GSource> source = adoptGRef(g_timeout_source_new(std::chrono::duration_cast<std::chrono::milliseconds>(duration).count()));
+ g_source_set_name(source.get(), "[WebKit] RunLoop dispatchAfter");
+
+ std::unique_ptr<DispatchAfterContext> context = std::make_unique<DispatchAfterContext>(WTFMove(function));
+ g_source_set_callback(source.get(), [](gpointer userData) -> gboolean {
+ std::unique_ptr<DispatchAfterContext> context(static_cast<DispatchAfterContext*>(userData));
+ context->dispatch();
+ return G_SOURCE_REMOVE;
+ }, context.release(), nullptr);
+ g_source_attach(source.get(), m_mainContext.get());
+}
+
+RunLoop::TimerBase::TimerBase(RunLoop& runLoop)
+ : m_runLoop(runLoop)
+ , m_source(adoptGRef(g_source_new(&runLoopSourceFunctions, sizeof(GSource))))
+{
+ g_source_set_name(m_source.get(), "[WebKit] RunLoop::Timer work");
+ g_source_set_callback(m_source.get(), [](gpointer userData) -> gboolean {
+ RunLoop::TimerBase* timer = static_cast<RunLoop::TimerBase*>(userData);
+ timer->fired();
+ if (timer->m_isRepeating)
+ timer->updateReadyTime();
+ return G_SOURCE_CONTINUE;
+ }, this, nullptr);
+ g_source_attach(m_source.get(), m_runLoop.m_mainContext.get());
+}
+
+RunLoop::TimerBase::~TimerBase()
+{
+ g_source_destroy(m_source.get());
+}
+
+void RunLoop::TimerBase::setPriority(int priority)
+{
+ g_source_set_priority(m_source.get(), priority);
+}
+
+void RunLoop::TimerBase::updateReadyTime()
+{
+ if (!m_fireInterval.count()) {
+ g_source_set_ready_time(m_source.get(), 0);
+ return;
+ }
+
+ gint64 currentTime = g_get_monotonic_time();
+ gint64 targetTime = currentTime + std::min<gint64>(G_MAXINT64 - currentTime, m_fireInterval.count());
+ ASSERT(targetTime >= currentTime);
+ g_source_set_ready_time(m_source.get(), targetTime);
+}
+
+void RunLoop::TimerBase::start(double fireInterval, bool repeat)
+{
+ auto intervalDuration = std::chrono::duration<double>(fireInterval);
+ auto safeDuration = std::chrono::microseconds::max();
+ if (intervalDuration < safeDuration)
+ safeDuration = std::chrono::duration_cast<std::chrono::microseconds>(intervalDuration);
+
+ m_fireInterval = safeDuration;
+ m_isRepeating = repeat;
+ updateReadyTime();
+}
+
+void RunLoop::TimerBase::stop()
+{
+ g_source_set_ready_time(m_source.get(), -1);
+}
+
+bool RunLoop::TimerBase::isActive() const
+{
+ return g_source_get_ready_time(m_source.get()) != -1;
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/gtk/RunLoopGtk.cpp b/Source/WTF/wtf/gtk/RunLoopGtk.cpp
deleted file mode 100644
index 8a2ea220c..000000000
--- a/Source/WTF/wtf/gtk/RunLoopGtk.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
- * Portions Copyright (c) 2010 Motorola Mobility, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "RunLoop.h"
-
-#include <glib.h>
-#include <wtf/MainThread.h>
-
-namespace WTF {
-
-RunLoop::RunLoop()
-{
- // g_main_context_default() doesn't add an extra reference.
- m_runLoopContext = isMainThread() ? g_main_context_default() : adoptGRef(g_main_context_new());
- ASSERT(m_runLoopContext);
- GRefPtr<GMainLoop> innermostLoop = adoptGRef(g_main_loop_new(m_runLoopContext.get(), FALSE));
- ASSERT(innermostLoop);
- m_runLoopMainLoops.append(innermostLoop);
-}
-
-RunLoop::~RunLoop()
-{
- for (int i = m_runLoopMainLoops.size() - 1; i >= 0; --i) {
- if (!g_main_loop_is_running(m_runLoopMainLoops[i].get()))
- continue;
- g_main_loop_quit(m_runLoopMainLoops[i].get());
- }
-}
-
-void RunLoop::run()
-{
- RunLoop* mainRunLoop = RunLoop::current();
- GMainLoop* innermostLoop = mainRunLoop->innermostLoop();
- if (!g_main_loop_is_running(innermostLoop)) {
- g_main_loop_run(innermostLoop);
- return;
- }
-
- // Create and run a nested loop if the innermost one was already running.
- GMainLoop* nestedMainLoop = g_main_loop_new(0, FALSE);
- mainRunLoop->pushNestedMainLoop(nestedMainLoop);
- g_main_loop_run(nestedMainLoop);
- mainRunLoop->popNestedMainLoop();
-}
-
-GMainLoop* RunLoop::innermostLoop()
-{
- // The innermost main loop should always be there.
- ASSERT(!m_runLoopMainLoops.isEmpty());
- return m_runLoopMainLoops[0].get();
-}
-
-void RunLoop::pushNestedMainLoop(GMainLoop* nestedLoop)
-{
- // The innermost main loop should always be there.
- ASSERT(!m_runLoopMainLoops.isEmpty());
- m_runLoopMainLoops.append(adoptGRef(nestedLoop));
-}
-
-void RunLoop::popNestedMainLoop()
-{
- // The innermost main loop should always be there.
- ASSERT(!m_runLoopMainLoops.isEmpty());
- m_runLoopMainLoops.removeLast();
-}
-
-void RunLoop::stop()
-{
- // The innermost main loop should always be there.
- ASSERT(!m_runLoopMainLoops.isEmpty());
- GRefPtr<GMainLoop> lastMainLoop = m_runLoopMainLoops.last();
- if (g_main_loop_is_running(lastMainLoop.get()))
- g_main_loop_quit(lastMainLoop.get());
-}
-
-gboolean RunLoop::queueWork(RunLoop* runLoop)
-{
- runLoop->performWork();
- return FALSE;
-}
-
-void RunLoop::wakeUp()
-{
- GRefPtr<GSource> source = adoptGRef(g_idle_source_new());
- g_source_set_priority(source.get(), G_PRIORITY_DEFAULT);
- g_source_set_callback(source.get(), reinterpret_cast<GSourceFunc>(&RunLoop::queueWork), this, 0);
- g_source_attach(source.get(), m_runLoopContext.get());
-
- g_main_context_wakeup(m_runLoopContext.get());
-}
-
-RunLoop::TimerBase::TimerBase(RunLoop* runLoop)
- : m_runLoop(runLoop)
- , m_timerSource(0)
-{
-}
-
-RunLoop::TimerBase::~TimerBase()
-{
- stop();
-}
-
-void RunLoop::TimerBase::clearTimerSource()
-{
- m_timerSource = 0;
-}
-
-gboolean RunLoop::TimerBase::timerFiredCallback(RunLoop::TimerBase* timer)
-{
- GSource* currentTimerSource = timer->m_timerSource.get();
- bool isRepeating = timer->isRepeating();
- // This can change the timerSource by starting a new timer within the callback.
- if (!isRepeating && currentTimerSource == timer->m_timerSource.get())
- timer->clearTimerSource();
-
- timer->fired();
- return isRepeating;
-}
-
-void RunLoop::TimerBase::start(double fireInterval, bool repeat)
-{
- if (m_timerSource)
- stop();
-
- m_timerSource = adoptGRef(g_timeout_source_new(static_cast<guint>(fireInterval * 1000)));
- m_isRepeating = repeat;
- g_source_set_callback(m_timerSource.get(), reinterpret_cast<GSourceFunc>(&RunLoop::TimerBase::timerFiredCallback), this, 0);
- g_source_attach(m_timerSource.get(), m_runLoop->m_runLoopContext.get());
-}
-
-void RunLoop::TimerBase::stop()
-{
- if (!m_timerSource)
- return;
-
- g_source_destroy(m_timerSource.get());
- clearTimerSource();
-}
-
-bool RunLoop::TimerBase::isActive() const
-{
- return m_timerSource;
-}
-
-} // namespace WTF
diff --git a/Source/WTF/wtf/mbmalloc.cpp b/Source/WTF/wtf/mbmalloc.cpp
new file mode 100644
index 000000000..4569db4f3
--- /dev/null
+++ b/Source/WTF/wtf/mbmalloc.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FastMalloc.h"
+
+#define EXPORT __attribute__((visibility("default")))
+
+extern "C" {
+
+EXPORT void* mbmalloc(size_t);
+EXPORT void mbfree(void*, size_t);
+EXPORT void* mbrealloc(void*, size_t, size_t);
+EXPORT void mbscavenge();
+
+void* mbmalloc(size_t size)
+{
+ return WTF::fastMalloc(size);
+}
+
+void mbfree(void* p, size_t)
+{
+ return WTF::fastFree(p);
+}
+
+void* mbrealloc(void* p, size_t, size_t size)
+{
+ return WTF::fastRealloc(p, size);
+}
+
+void mbscavenge()
+{
+ WTF::releaseFastMallocFreeMemory();
+}
+
+} // extern "C"
diff --git a/Source/WTF/wtf/persistence/Coder.h b/Source/WTF/wtf/persistence/Coder.h
new file mode 100644
index 000000000..f7d98c4ed
--- /dev/null
+++ b/Source/WTF/wtf/persistence/Coder.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace WTF {
+namespace Persistence {
+
+class Decoder;
+class Encoder;
+
+template<typename T> struct Coder {
+ static void encode(Encoder& encoder, const T& t)
+ {
+ t.encode(encoder);
+ }
+
+ static bool decode(Decoder& decoder, T& t)
+ {
+ return T::decode(decoder, t);
+ }
+};
+
+}
+}
diff --git a/Source/WTF/wtf/persistence/Coders.cpp b/Source/WTF/wtf/persistence/Coders.cpp
new file mode 100644
index 000000000..7cd42f488
--- /dev/null
+++ b/Source/WTF/wtf/persistence/Coders.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2011, 2014-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Coders.h"
+
+#include <wtf/text/CString.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+namespace Persistence {
+
+void Coder<AtomicString>::encode(Encoder& encoder, const AtomicString& atomicString)
+{
+ encoder << atomicString.string();
+}
+
+bool Coder<AtomicString>::decode(Decoder& decoder, AtomicString& atomicString)
+{
+ String string;
+ if (!decoder.decode(string))
+ return false;
+
+ atomicString = string;
+ return true;
+}
+
+void Coder<CString>::encode(Encoder& encoder, const CString& string)
+{
+ // Special case the null string.
+ if (string.isNull()) {
+ encoder << std::numeric_limits<uint32_t>::max();
+ return;
+ }
+
+ uint32_t length = string.length();
+ encoder << length;
+ encoder.encodeFixedLengthData(reinterpret_cast<const uint8_t*>(string.data()), length);
+}
+
+bool Coder<CString>::decode(Decoder& decoder, CString& result)
+{
+ uint32_t length;
+ if (!decoder.decode(length))
+ return false;
+
+ if (length == std::numeric_limits<uint32_t>::max()) {
+ // This is the null string.
+ result = CString();
+ return true;
+ }
+
+ // Before allocating the string, make sure that the decoder buffer is big enough.
+ if (!decoder.bufferIsLargeEnoughToContain<char>(length))
+ return false;
+
+ char* buffer;
+ CString string = CString::newUninitialized(length, buffer);
+ if (!decoder.decodeFixedLengthData(reinterpret_cast<uint8_t*>(buffer), length))
+ return false;
+
+ result = string;
+ return true;
+}
+
+
+void Coder<String>::encode(Encoder& encoder, const String& string)
+{
+ // Special case the null string.
+ if (string.isNull()) {
+ encoder << std::numeric_limits<uint32_t>::max();
+ return;
+ }
+
+ uint32_t length = string.length();
+ bool is8Bit = string.is8Bit();
+
+ encoder << length << is8Bit;
+
+ if (is8Bit)
+ encoder.encodeFixedLengthData(reinterpret_cast<const uint8_t*>(string.characters8()), length * sizeof(LChar));
+ else
+ encoder.encodeFixedLengthData(reinterpret_cast<const uint8_t*>(string.characters16()), length * sizeof(UChar));
+}
+
+template <typename CharacterType>
+static inline bool decodeStringText(Decoder& decoder, uint32_t length, String& result)
+{
+ // Before allocating the string, make sure that the decoder buffer is big enough.
+ if (!decoder.bufferIsLargeEnoughToContain<CharacterType>(length))
+ return false;
+
+ CharacterType* buffer;
+ String string = String::createUninitialized(length, buffer);
+ if (!decoder.decodeFixedLengthData(reinterpret_cast<uint8_t*>(buffer), length * sizeof(CharacterType)))
+ return false;
+
+ result = string;
+ return true;
+}
+
+bool Coder<String>::decode(Decoder& decoder, String& result)
+{
+ uint32_t length;
+ if (!decoder.decode(length))
+ return false;
+
+ if (length == std::numeric_limits<uint32_t>::max()) {
+ // This is the null string.
+ result = String();
+ return true;
+ }
+
+ bool is8Bit;
+ if (!decoder.decode(is8Bit))
+ return false;
+
+ if (is8Bit)
+ return decodeStringText<LChar>(decoder, length, result);
+ return decodeStringText<UChar>(decoder, length, result);
+}
+
+void Coder<SHA1::Digest>::encode(Encoder& encoder, const SHA1::Digest& digest)
+{
+ encoder.encodeFixedLengthData(digest.data(), sizeof(digest));
+}
+
+bool Coder<SHA1::Digest>::decode(Decoder& decoder, SHA1::Digest& digest)
+{
+ return decoder.decodeFixedLengthData(digest.data(), sizeof(digest));
+}
+
+}
+}
diff --git a/Source/WTF/wtf/persistence/Coders.h b/Source/WTF/wtf/persistence/Coders.h
new file mode 100644
index 000000000..3cde70a7f
--- /dev/null
+++ b/Source/WTF/wtf/persistence/Coders.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2010, 2014-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <utility>
+#include <wtf/Forward.h>
+#include <wtf/HashMap.h>
+#include <wtf/HashSet.h>
+#include <wtf/SHA1.h>
+#include <wtf/Vector.h>
+#include <wtf/persistence/Decoder.h>
+#include <wtf/persistence/Encoder.h>
+
+namespace WTF {
+namespace Persistence {
+
+template<typename T, typename U> struct Coder<std::pair<T, U>> {
+ static void encode(Encoder& encoder, const std::pair<T, U>& pair)
+ {
+ encoder << pair.first << pair.second;
+ }
+
+ static bool decode(Decoder& decoder, std::pair<T, U>& pair)
+ {
+ T first;
+ if (!decoder.decode(first))
+ return false;
+
+ U second;
+ if (!decoder.decode(second))
+ return false;
+
+ pair.first = first;
+ pair.second = second;
+ return true;
+ }
+};
+
+template<typename Rep, typename Period> struct Coder<std::chrono::duration<Rep, Period>> {
+ static void encode(Encoder& encoder, const std::chrono::duration<Rep, Period>& duration)
+ {
+ static_assert(std::is_integral<Rep>::value && std::is_signed<Rep>::value && sizeof(Rep) <= sizeof(int64_t), "Serialization of this Rep type is not supported yet. Only signed integer type which can be fit in an int64_t is currently supported.");
+ encoder << static_cast<int64_t>(duration.count());
+ }
+
+ static bool decode(Decoder& decoder, std::chrono::duration<Rep, Period>& result)
+ {
+ int64_t count;
+ if (!decoder.decode(count))
+ return false;
+ result = std::chrono::duration<Rep, Period>(static_cast<Rep>(count));
+ return true;
+ }
+};
+
+template<typename T> struct Coder<std::optional<T>> {
+ static void encode(Encoder& encoder, const std::optional<T>& optional)
+ {
+ if (!optional) {
+ encoder << false;
+ return;
+ }
+
+ encoder << true;
+ encoder << optional.value();
+ }
+
+ static bool decode(Decoder& decoder, std::optional<T>& optional)
+ {
+ bool isEngaged;
+ if (!decoder.decode(isEngaged))
+ return false;
+
+ if (!isEngaged) {
+ optional = std::nullopt;
+ return true;
+ }
+
+ T value;
+ if (!decoder.decode(value))
+ return false;
+
+ optional = WTFMove(value);
+ return true;
+ }
+};
+
+template<typename KeyType, typename ValueType> struct Coder<WTF::KeyValuePair<KeyType, ValueType>> {
+ static void encode(Encoder& encoder, const WTF::KeyValuePair<KeyType, ValueType>& pair)
+ {
+ encoder << pair.key << pair.value;
+ }
+
+ static bool decode(Decoder& decoder, WTF::KeyValuePair<KeyType, ValueType>& pair)
+ {
+ KeyType key;
+ if (!decoder.decode(key))
+ return false;
+
+ ValueType value;
+ if (!decoder.decode(value))
+ return false;
+
+ pair.key = key;
+ pair.value = value;
+ return true;
+ }
+};
+
+template<bool fixedSizeElements, typename T, size_t inlineCapacity> struct VectorCoder;
+
+template<typename T, size_t inlineCapacity> struct VectorCoder<false, T, inlineCapacity> {
+ static void encode(Encoder& encoder, const Vector<T, inlineCapacity>& vector)
+ {
+ encoder << static_cast<uint64_t>(vector.size());
+ for (size_t i = 0; i < vector.size(); ++i)
+ encoder << vector[i];
+ }
+
+ static bool decode(Decoder& decoder, Vector<T, inlineCapacity>& vector)
+ {
+ uint64_t size;
+ if (!decoder.decode(size))
+ return false;
+
+ Vector<T, inlineCapacity> tmp;
+ for (size_t i = 0; i < size; ++i) {
+ T element;
+ if (!decoder.decode(element))
+ return false;
+
+ tmp.append(WTFMove(element));
+ }
+
+ tmp.shrinkToFit();
+ vector.swap(tmp);
+ return true;
+ }
+};
+
+template<typename T, size_t inlineCapacity> struct VectorCoder<true, T, inlineCapacity> {
+ static void encode(Encoder& encoder, const Vector<T, inlineCapacity>& vector)
+ {
+ encoder << static_cast<uint64_t>(vector.size());
+ encoder.encodeFixedLengthData(reinterpret_cast<const uint8_t*>(vector.data()), vector.size() * sizeof(T), alignof(T));
+ }
+
+ static bool decode(Decoder& decoder, Vector<T, inlineCapacity>& vector)
+ {
+ uint64_t size;
+ if (!decoder.decode(size))
+ return false;
+
+ // Since we know the total size of the elements, we can allocate the vector in
+ // one fell swoop. Before allocating we must however make sure that the decoder buffer
+ // is big enough.
+ if (!decoder.bufferIsLargeEnoughToContain<T>(size))
+ return false;
+
+ Vector<T, inlineCapacity> temp;
+ temp.resize(size);
+
+ decoder.decodeFixedLengthData(reinterpret_cast<uint8_t*>(temp.data()), size * sizeof(T));
+
+ vector.swap(temp);
+ return true;
+ }
+};
+
+template<typename T, size_t inlineCapacity> struct Coder<Vector<T, inlineCapacity>> : VectorCoder<std::is_arithmetic<T>::value, T, inlineCapacity> { };
+
+template<typename KeyArg, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg> struct Coder<HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>> {
+ typedef HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg> HashMapType;
+
+ static void encode(Encoder& encoder, const HashMapType& hashMap)
+ {
+ encoder << static_cast<uint64_t>(hashMap.size());
+ for (typename HashMapType::const_iterator it = hashMap.begin(), end = hashMap.end(); it != end; ++it)
+ encoder << *it;
+ }
+
+ static bool decode(Decoder& decoder, HashMapType& hashMap)
+ {
+ uint64_t hashMapSize;
+ if (!decoder.decode(hashMapSize))
+ return false;
+
+ HashMapType tempHashMap;
+ for (uint64_t i = 0; i < hashMapSize; ++i) {
+ KeyArg key;
+ MappedArg value;
+ if (!decoder.decode(key))
+ return false;
+ if (!decoder.decode(value))
+ return false;
+
+ if (!tempHashMap.add(key, value).isNewEntry) {
+ // The hash map already has the specified key, bail.
+ return false;
+ }
+ }
+
+ hashMap.swap(tempHashMap);
+ return true;
+ }
+};
+
+template<typename KeyArg, typename HashArg, typename KeyTraitsArg> struct Coder<HashSet<KeyArg, HashArg, KeyTraitsArg>> {
+ typedef HashSet<KeyArg, HashArg, KeyTraitsArg> HashSetType;
+
+ static void encode(Encoder& encoder, const HashSetType& hashSet)
+ {
+ encoder << static_cast<uint64_t>(hashSet.size());
+ for (typename HashSetType::const_iterator it = hashSet.begin(), end = hashSet.end(); it != end; ++it)
+ encoder << *it;
+ }
+
+ static bool decode(Decoder& decoder, HashSetType& hashSet)
+ {
+ uint64_t hashSetSize;
+ if (!decoder.decode(hashSetSize))
+ return false;
+
+ HashSetType tempHashSet;
+ for (uint64_t i = 0; i < hashSetSize; ++i) {
+ KeyArg key;
+ if (!decoder.decode(key))
+ return false;
+
+ if (!tempHashSet.add(key).isNewEntry) {
+ // The hash map already has the specified key, bail.
+ return false;
+ }
+ }
+
+ hashSet.swap(tempHashSet);
+ return true;
+ }
+};
+
+template<> struct Coder<std::chrono::system_clock::time_point> {
+ static void encode(Encoder& encoder, const std::chrono::system_clock::time_point& timePoint)
+ {
+ encoder << static_cast<int64_t>(timePoint.time_since_epoch().count());
+ }
+
+ static bool decode(Decoder& decoder, std::chrono::system_clock::time_point& result)
+ {
+ int64_t time;
+ if (!decoder.decode(time))
+ return false;
+
+ result = std::chrono::system_clock::time_point(std::chrono::system_clock::duration(static_cast<std::chrono::system_clock::rep>(time)));
+ return true;
+ }
+};
+
+template<> struct Coder<AtomicString> {
+ WTF_EXPORT_PRIVATE static void encode(Encoder&, const AtomicString&);
+ WTF_EXPORT_PRIVATE static bool decode(Decoder&, AtomicString&);
+};
+
+template<> struct Coder<CString> {
+ WTF_EXPORT_PRIVATE static void encode(Encoder&, const CString&);
+ WTF_EXPORT_PRIVATE static bool decode(Decoder&, CString&);
+};
+
+template<> struct Coder<String> {
+ WTF_EXPORT_PRIVATE static void encode(Encoder&, const String&);
+ WTF_EXPORT_PRIVATE static bool decode(Decoder&, String&);
+};
+
+template<> struct Coder<SHA1::Digest> {
+ WTF_EXPORT_PRIVATE static void encode(Encoder&, const SHA1::Digest&);
+ WTF_EXPORT_PRIVATE static bool decode(Decoder&, SHA1::Digest&);
+};
+
+}
+}
diff --git a/Source/WTF/wtf/persistence/Decoder.cpp b/Source/WTF/wtf/persistence/Decoder.cpp
new file mode 100644
index 000000000..354ff0606
--- /dev/null
+++ b/Source/WTF/wtf/persistence/Decoder.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2010, 2011, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Decoder.h"
+
+#include <wtf/persistence/Encoder.h>
+
+namespace WTF {
+namespace Persistence {
+
+Decoder::Decoder(const uint8_t* buffer, size_t bufferSize)
+ : m_buffer(buffer)
+ , m_bufferPosition(buffer)
+ , m_bufferEnd(buffer + bufferSize)
+{
+}
+
+Decoder::~Decoder()
+{
+}
+
+bool Decoder::bufferIsLargeEnoughToContain(size_t size) const
+{
+ return size <= static_cast<size_t>(m_bufferEnd - m_bufferPosition);
+}
+
+bool Decoder::decodeFixedLengthData(uint8_t* data, size_t size)
+{
+ if (!bufferIsLargeEnoughToContain(size))
+ return false;
+
+ memcpy(data, m_bufferPosition, size);
+ m_bufferPosition += size;
+
+ Encoder::updateChecksumForData(m_sha1, data, size);
+ return true;
+}
+
+template<typename Type>
+bool Decoder::decodeNumber(Type& value)
+{
+ if (!bufferIsLargeEnoughToContain(sizeof(value)))
+ return false;
+
+ memcpy(&value, m_bufferPosition, sizeof(value));
+ m_bufferPosition += sizeof(Type);
+
+ Encoder::updateChecksumForNumber(m_sha1, value);
+ return true;
+}
+
+bool Decoder::decode(bool& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(uint8_t& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(uint16_t& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(uint32_t& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(uint64_t& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(int32_t& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(int64_t& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(float& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::decode(double& result)
+{
+ return decodeNumber(result);
+}
+
+bool Decoder::verifyChecksum()
+{
+ SHA1::Digest computedHash;
+ m_sha1.computeHash(computedHash);
+
+ SHA1::Digest savedHash;
+ if (!decodeFixedLengthData(savedHash.data(), sizeof(savedHash)))
+ return false;
+
+ return computedHash == savedHash;
+}
+
+}
+}
diff --git a/Source/WTF/wtf/persistence/Decoder.h b/Source/WTF/wtf/persistence/Decoder.h
new file mode 100644
index 000000000..414ec88fc
--- /dev/null
+++ b/Source/WTF/wtf/persistence/Decoder.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/SHA1.h>
+#include <wtf/persistence/Coder.h>
+
+namespace WTF {
+namespace Persistence {
+
+class Decoder {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ WTF_EXPORT_PRIVATE Decoder(const uint8_t* buffer, size_t bufferSize);
+ WTF_EXPORT_PRIVATE ~Decoder();
+
+ size_t length() const { return m_bufferEnd - m_buffer; }
+ size_t currentOffset() const { return m_bufferPosition - m_buffer; }
+
+ WTF_EXPORT_PRIVATE bool verifyChecksum();
+
+ WTF_EXPORT_PRIVATE bool decodeFixedLengthData(uint8_t*, size_t);
+
+ WTF_EXPORT_PRIVATE bool decode(bool&);
+ WTF_EXPORT_PRIVATE bool decode(uint8_t&);
+ WTF_EXPORT_PRIVATE bool decode(uint16_t&);
+ WTF_EXPORT_PRIVATE bool decode(uint32_t&);
+ WTF_EXPORT_PRIVATE bool decode(uint64_t&);
+ WTF_EXPORT_PRIVATE bool decode(int32_t&);
+ WTF_EXPORT_PRIVATE bool decode(int64_t&);
+ WTF_EXPORT_PRIVATE bool decode(float&);
+ WTF_EXPORT_PRIVATE bool decode(double&);
+
+ template<typename T> bool decodeEnum(T& result)
+ {
+ static_assert(sizeof(T) <= 8, "Enum type T must not be larger than 64 bits!");
+
+ uint64_t value;
+ if (!decode(value))
+ return false;
+
+ result = static_cast<T>(value);
+ return true;
+ }
+
+ template<typename T> bool decode(T& t)
+ {
+ return Coder<T>::decode(*this, t);
+ }
+
+ template<typename T>
+ bool bufferIsLargeEnoughToContain(size_t numElements) const
+ {
+ static_assert(std::is_arithmetic<T>::value, "Type T must have a fixed, known encoded size!");
+
+ if (numElements > std::numeric_limits<size_t>::max() / sizeof(T))
+ return false;
+
+ return bufferIsLargeEnoughToContain(numElements * sizeof(T));
+ }
+
+ static const bool isIPCDecoder = false;
+
+private:
+ WTF_EXPORT_PRIVATE bool bufferIsLargeEnoughToContain(size_t) const;
+ template<typename Type> bool decodeNumber(Type&);
+
+ const uint8_t* m_buffer;
+ const uint8_t* m_bufferPosition;
+ const uint8_t* m_bufferEnd;
+
+ SHA1 m_sha1;
+};
+
+}
+}
+
diff --git a/Source/WTF/wtf/persistence/Encoder.cpp b/Source/WTF/wtf/persistence/Encoder.cpp
new file mode 100644
index 000000000..0f11e3481
--- /dev/null
+++ b/Source/WTF/wtf/persistence/Encoder.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2010, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Encoder.h"
+
+#include "SHA1.h"
+
+namespace WTF {
+namespace Persistence {
+
+Encoder::Encoder()
+{
+}
+
+Encoder::~Encoder()
+{
+}
+
+uint8_t* Encoder::grow(size_t size)
+{
+ size_t newPosition = m_buffer.size();
+ m_buffer.grow(m_buffer.size() + size);
+ return m_buffer.data() + newPosition;
+}
+
+void Encoder::updateChecksumForData(SHA1& sha1, const uint8_t* data, size_t size)
+{
+ auto typeSalt = Salt<uint8_t*>::value;
+ sha1.addBytes(reinterpret_cast<uint8_t*>(&typeSalt), sizeof(typeSalt));
+ sha1.addBytes(data, size);
+}
+
+void Encoder::encodeFixedLengthData(const uint8_t* data, size_t size)
+{
+ updateChecksumForData(m_sha1, data, size);
+
+ uint8_t* buffer = grow(size);
+ memcpy(buffer, data, size);
+}
+
+template<typename Type>
+void Encoder::encodeNumber(Type value)
+{
+ Encoder::updateChecksumForNumber(m_sha1, value);
+
+ uint8_t* buffer = grow(sizeof(Type));
+ memcpy(buffer, &value, sizeof(Type));
+}
+
+void Encoder::encode(bool value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(uint8_t value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(uint16_t value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(uint32_t value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(uint64_t value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(int32_t value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(int64_t value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(float value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encode(double value)
+{
+ encodeNumber(value);
+}
+
+void Encoder::encodeChecksum()
+{
+ SHA1::Digest hash;
+ m_sha1.computeHash(hash);
+ encodeFixedLengthData(hash.data(), hash.size());
+}
+
+}
+}
diff --git a/Source/WTF/wtf/persistence/Encoder.h b/Source/WTF/wtf/persistence/Encoder.h
new file mode 100644
index 000000000..61c965530
--- /dev/null
+++ b/Source/WTF/wtf/persistence/Encoder.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/SHA1.h>
+#include <wtf/Vector.h>
+#include <wtf/persistence/Coder.h>
+
+namespace WTF {
+namespace Persistence {
+
+class Encoder;
+class DataReference;
+
+class Encoder {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ WTF_EXPORT_PRIVATE Encoder();
+ WTF_EXPORT_PRIVATE ~Encoder();
+
+ WTF_EXPORT_PRIVATE void encodeChecksum();
+ WTF_EXPORT_PRIVATE void encodeFixedLengthData(const uint8_t*, size_t);
+
+ template<typename T> void encodeEnum(T t)
+ {
+ COMPILE_ASSERT(sizeof(T) <= sizeof(uint64_t), enum_type_must_not_be_larger_than_64_bits);
+
+ encode(static_cast<uint64_t>(t));
+ }
+
+ template<typename T> void encode(const T& t)
+ {
+ Coder<T>::encode(*this, t);
+ }
+
+ template<typename T> Encoder& operator<<(const T& t)
+ {
+ encode(t);
+ return *this;
+ }
+
+ const uint8_t* buffer() const { return m_buffer.data(); }
+ size_t bufferSize() const { return m_buffer.size(); }
+
+ WTF_EXPORT_PRIVATE static void updateChecksumForData(SHA1&, const uint8_t*, size_t);
+ template <typename Type> static void updateChecksumForNumber(SHA1&, Type);
+
+ static const bool isIPCEncoder = false;
+
+private:
+ WTF_EXPORT_PRIVATE void encode(bool);
+ WTF_EXPORT_PRIVATE void encode(uint8_t);
+ WTF_EXPORT_PRIVATE void encode(uint16_t);
+ WTF_EXPORT_PRIVATE void encode(uint32_t);
+ WTF_EXPORT_PRIVATE void encode(uint64_t);
+ WTF_EXPORT_PRIVATE void encode(int32_t);
+ WTF_EXPORT_PRIVATE void encode(int64_t);
+ WTF_EXPORT_PRIVATE void encode(float);
+ WTF_EXPORT_PRIVATE void encode(double);
+
+ template<typename Type> void encodeNumber(Type);
+
+ uint8_t* grow(size_t);
+
+ template <typename Type> struct Salt;
+
+ Vector<uint8_t, 4096> m_buffer;
+ SHA1 m_sha1;
+};
+
+template <> struct Encoder::Salt<bool> { static const unsigned value = 3; };
+template <> struct Encoder::Salt<uint8_t> { static const unsigned value = 5; };
+template <> struct Encoder::Salt<uint16_t> { static const unsigned value = 7; };
+template <> struct Encoder::Salt<uint32_t> { static const unsigned value = 11; };
+template <> struct Encoder::Salt<uint64_t> { static const unsigned value = 13; };
+template <> struct Encoder::Salt<int32_t> { static const unsigned value = 17; };
+template <> struct Encoder::Salt<int64_t> { static const unsigned value = 19; };
+template <> struct Encoder::Salt<float> { static const unsigned value = 23; };
+template <> struct Encoder::Salt<double> { static const unsigned value = 29; };
+template <> struct Encoder::Salt<uint8_t*> { static const unsigned value = 101; };
+
+template <typename Type>
+void Encoder::updateChecksumForNumber(SHA1& sha1, Type value)
+{
+ auto typeSalt = Salt<Type>::value;
+ sha1.addBytes(reinterpret_cast<uint8_t*>(&typeSalt), sizeof(typeSalt));
+ sha1.addBytes(reinterpret_cast<uint8_t*>(&value), sizeof(value));
+}
+
+}
+}
diff --git a/Source/WTF/wtf/spi/darwin/CommonCryptoSPI.h b/Source/WTF/wtf/spi/darwin/CommonCryptoSPI.h
new file mode 100644
index 000000000..f79bff96f
--- /dev/null
+++ b/Source/WTF/wtf/spi/darwin/CommonCryptoSPI.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CommonCryptoSPI_h
+#define CommonCryptoSPI_h
+
+#if OS(DARWIN)
+
+#if USE(APPLE_INTERNAL_SDK)
+#include <CommonCrypto/CommonRandomSPI.h>
+#endif
+
+typedef struct __CCRandom* CCRandomRef;
+
+WTF_EXTERN_C_BEGIN
+
+extern const CCRandomRef kCCRandomDefault;
+int CCRandomCopyBytes(CCRandomRef rnd, void *bytes, size_t count);
+
+WTF_EXTERN_C_END
+
+#endif // OS(DARWIN)
+
+#endif /* CommonCryptoSPI_h */
diff --git a/Source/WTF/wtf/spi/darwin/SandboxSPI.h b/Source/WTF/wtf/spi/darwin/SandboxSPI.h
new file mode 100644
index 000000000..50e6814ac
--- /dev/null
+++ b/Source/WTF/wtf/spi/darwin/SandboxSPI.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SandboxSPI_h
+#define SandboxSPI_h
+
+#if OS(DARWIN)
+
+#import <sandbox.h>
+
+#if USE(APPLE_INTERNAL_SDK)
+#import <sandbox/private.h>
+#else
+enum sandbox_filter_type {
+ SANDBOX_FILTER_NONE,
+ SANDBOX_FILTER_GLOBAL_NAME = 2,
+};
+
+#define SANDBOX_NAMED_EXTERNAL 0x0003
+#endif
+
+WTF_EXTERN_C_BEGIN
+
+extern const enum sandbox_filter_type SANDBOX_CHECK_NO_REPORT;
+int sandbox_check(pid_t, const char *operation, enum sandbox_filter_type, ...);
+int sandbox_container_path_for_pid(pid_t, char *buffer, size_t bufsize);
+int sandbox_init_with_parameters(const char *profile, uint64_t flags, const char *const parameters[], char **errorbuf);
+
+WTF_EXTERN_C_END
+
+#endif // OS(DARWIN)
+
+#endif // SandboxSPI_h
diff --git a/Source/WTF/wtf/spi/darwin/XPCSPI.h b/Source/WTF/wtf/spi/darwin/XPCSPI.h
new file mode 100644
index 000000000..37a5e1282
--- /dev/null
+++ b/Source/WTF/wtf/spi/darwin/XPCSPI.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <dispatch/dispatch.h>
+#include <os/object.h>
+
+#if PLATFORM(MAC) || USE(APPLE_INTERNAL_SDK)
+#include <xpc/xpc.h>
+#else
+
+#if OS_OBJECT_USE_OBJC
+OS_OBJECT_DECL(xpc_object);
+typedef xpc_object_t xpc_connection_t;
+
+static ALWAYS_INLINE void _xpc_object_validate(xpc_object_t object)
+{
+ void *isa = *(void * volatile *)(OS_OBJECT_BRIDGE void *)object;
+ (void)isa;
+}
+
+#define XPC_GLOBAL_OBJECT(object) ((OS_OBJECT_BRIDGE xpc_object_t)&(object))
+
+#else // OS_OBJECT_USE_OBJC
+
+typedef void* xpc_object_t;
+typedef void* xpc_connection_t;
+
+#define XPC_GLOBAL_OBJECT(object) (&(object))
+
+#endif // OS_OBJECT_USE_OBJC
+
+typedef const struct _xpc_type_s* xpc_type_t;
+
+#if COMPILER_SUPPORTS(BLOCKS)
+typedef bool (^xpc_array_applier_t)(size_t index, xpc_object_t);
+typedef bool (^xpc_dictionary_applier_t)(const char *key, xpc_object_t value);
+typedef void (^xpc_handler_t)(xpc_object_t);
+#endif
+
+typedef void (*xpc_connection_handler_t)(xpc_connection_t connection);
+
+#define XPC_ARRAY_APPEND ((size_t)(-1))
+#define XPC_ERROR_CONNECTION_INVALID XPC_GLOBAL_OBJECT(_xpc_error_connection_invalid)
+#define XPC_ERROR_TERMINATION_IMMINENT XPC_GLOBAL_OBJECT(_xpc_error_termination_imminent)
+#define XPC_TYPE_ARRAY (&_xpc_type_array)
+#define XPC_TYPE_BOOL (&_xpc_type_bool)
+#define XPC_TYPE_DICTIONARY (&_xpc_type_dictionary)
+#define XPC_TYPE_ERROR (&_xpc_type_error)
+#define XPC_TYPE_STRING (&_xpc_type_string)
+
+#endif // PLATFORM(MAC) || USE(APPLE_INTERNAL_SDK)
+
+#if USE(APPLE_INTERNAL_SDK)
+#include <xpc/private.h>
+#else
+enum {
+ DISPATCH_MACH_SEND_POSSIBLE = 0x8,
+};
+#endif
+
+WTF_EXTERN_C_BEGIN
+
+extern const struct _xpc_dictionary_s _xpc_error_connection_invalid;
+extern const struct _xpc_dictionary_s _xpc_error_termination_imminent;
+
+extern const struct _xpc_type_s _xpc_type_array;
+extern const struct _xpc_type_s _xpc_type_bool;
+extern const struct _xpc_type_s _xpc_type_dictionary;
+extern const struct _xpc_type_s _xpc_type_error;
+extern const struct _xpc_type_s _xpc_type_string;
+
+xpc_object_t xpc_array_create(const xpc_object_t*, size_t count);
+#if COMPILER_SUPPORTS(BLOCKS)
+bool xpc_array_apply(xpc_object_t, xpc_array_applier_t);
+bool xpc_dictionary_apply(xpc_object_t xdict, xpc_dictionary_applier_t applier);
+#endif
+size_t xpc_array_get_count(xpc_object_t);
+const char* xpc_array_get_string(xpc_object_t, size_t index);
+void xpc_array_set_string(xpc_object_t, size_t index, const char* string);
+bool xpc_bool_get_value(xpc_object_t);
+void xpc_connection_cancel(xpc_connection_t);
+xpc_connection_t xpc_connection_create(const char* name, dispatch_queue_t);
+xpc_connection_t xpc_connection_create_mach_service(const char* name, dispatch_queue_t, uint64_t flags);
+pid_t xpc_connection_get_pid(xpc_connection_t);
+void xpc_connection_resume(xpc_connection_t);
+void xpc_connection_send_message(xpc_connection_t, xpc_object_t);
+void xpc_connection_send_message_with_reply(xpc_connection_t, xpc_object_t, dispatch_queue_t, xpc_handler_t);
+void xpc_connection_set_event_handler(xpc_connection_t, xpc_handler_t);
+void xpc_connection_set_target_queue(xpc_connection_t, dispatch_queue_t);
+xpc_object_t xpc_dictionary_create(const char* const* keys, const xpc_object_t*, size_t count);
+xpc_object_t xpc_dictionary_create_reply(xpc_object_t);
+int xpc_dictionary_dup_fd(xpc_object_t, const char* key);
+xpc_connection_t xpc_dictionary_get_remote_connection(xpc_object_t);
+bool xpc_dictionary_get_bool(xpc_object_t, const char* key);
+const char* xpc_dictionary_get_string(xpc_object_t, const char* key);
+uint64_t xpc_dictionary_get_uint64(xpc_object_t, const char* key);
+xpc_object_t xpc_dictionary_get_value(xpc_object_t, const char* key);
+void xpc_dictionary_set_bool(xpc_object_t, const char* key, bool value);
+void xpc_dictionary_set_fd(xpc_object_t, const char* key, int fd);
+void xpc_dictionary_set_string(xpc_object_t, const char* key, const char* string);
+void xpc_dictionary_set_uint64(xpc_object_t, const char* key, uint64_t value);
+void xpc_dictionary_set_value(xpc_object_t, const char*key, xpc_object_t value);
+xpc_type_t xpc_get_type(xpc_object_t);
+void xpc_main(xpc_connection_handler_t);
+const char* xpc_string_get_string_ptr(xpc_object_t);
+void xpc_transaction_begin(void);
+void xpc_transaction_end(void);
+void xpc_transaction_exit_clean(void);
+void xpc_track_activity(void);
+
+xpc_object_t xpc_connection_copy_entitlement_value(xpc_connection_t, const char* entitlement);
+void xpc_connection_get_audit_token(xpc_connection_t, audit_token_t*);
+void xpc_connection_kill(xpc_connection_t, int);
+void xpc_connection_set_instance(xpc_connection_t, uuid_t);
+mach_port_t xpc_dictionary_copy_mach_send(xpc_object_t, const char*);
+void xpc_dictionary_set_mach_send(xpc_object_t, const char*, mach_port_t);
+
+void xpc_connection_set_bootstrap(xpc_connection_t, xpc_object_t bootstrap);
+xpc_object_t xpc_copy_bootstrap(void);
+void xpc_connection_set_oneshot_instance(xpc_connection_t, uuid_t instance);
+
+#if OS_OBJECT_USE_OBJC_RETAIN_RELEASE
+#if !defined(xpc_retain)
+#define xpc_retain(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o retain]; })
+#endif
+#else
+xpc_object_t xpc_retain(xpc_object_t);
+#endif
+
+#if OS_OBJECT_USE_OBJC_RETAIN_RELEASE
+#if !defined(xpc_retain)
+#define xpc_release(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o release]; })
+#endif
+#else
+void xpc_release(xpc_object_t);
+#endif
+
+WTF_EXTERN_C_END
diff --git a/Source/WTF/wtf/spi/darwin/dyldSPI.h b/Source/WTF/wtf/spi/darwin/dyldSPI.h
new file mode 100644
index 000000000..0099c6b0b
--- /dev/null
+++ b/Source/WTF/wtf/spi/darwin/dyldSPI.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if USE(APPLE_INTERNAL_SDK)
+
+#include <mach-o/dyld_priv.h>
+
+#if PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED < 100000
+#define DYLD_IOS_VERSION_10_0 0x000A0000
+#endif
+
+#if PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 101200
+#define DYLD_MACOSX_VERSION_10_12 0x000A0C00
+#endif
+
+#ifndef DYLD_IOS_VERSION_11_0
+#define DYLD_IOS_VERSION_11_0 0x000B0000
+#endif
+
+#ifndef DYLD_MACOSX_VERSION_10_13
+#define DYLD_MACOSX_VERSION_10_13 0x000A0D00
+#endif
+
+#else
+
+#define DYLD_IOS_VERSION_3_0 0x00030000
+#define DYLD_IOS_VERSION_4_2 0x00040200
+#define DYLD_IOS_VERSION_5_0 0x00050000
+#define DYLD_IOS_VERSION_6_0 0x00060000
+#define DYLD_IOS_VERSION_7_0 0x00070000
+#define DYLD_IOS_VERSION_9_0 0x00090000
+#define DYLD_IOS_VERSION_10_0 0x000A0000
+#define DYLD_IOS_VERSION_11_0 0x000B0000
+
+#define DYLD_MACOSX_VERSION_10_11 0x000A0B00
+#define DYLD_MACOSX_VERSION_10_12 0x000A0C00
+#define DYLD_MACOSX_VERSION_10_13 0x000A0D00
+
+#endif
+
+WTF_EXTERN_C_BEGIN
+
+uint32_t dyld_get_program_sdk_version();
+
+WTF_EXTERN_C_END
diff --git a/Source/WTF/wtf/text/ASCIIFastPath.h b/Source/WTF/wtf/text/ASCIIFastPath.h
index d057a6fa1..eb54828a2 100644
--- a/Source/WTF/wtf/text/ASCIIFastPath.h
+++ b/Source/WTF/wtf/text/ASCIIFastPath.h
@@ -22,12 +22,14 @@
#ifndef ASCIIFastPath_h
#define ASCIIFastPath_h
-#if OS(DARWIN) && (CPU(X86) || CPU(X86_64))
-#include <emmintrin.h>
-#endif
#include <stdint.h>
+#include <unicode/utypes.h>
#include <wtf/StdLibExtras.h>
-#include <wtf/unicode/Unicode.h>
+#include <wtf/text/LChar.h>
+
+#if CPU(X86_SSE2)
+#include <emmintrin.h>
+#endif
namespace WTF {
@@ -107,7 +109,7 @@ inline bool charactersAreAllASCII(const CharacterType* characters, size_t length
inline void copyLCharsFromUCharSource(LChar* destination, const UChar* source, size_t length)
{
-#if OS(DARWIN) && (CPU(X86) || CPU(X86_64))
+#if CPU(X86_SSE2)
const uintptr_t memoryAccessSize = 16; // Memory accesses on 16 byte (128 bit) alignment
const uintptr_t memoryAccessMask = memoryAccessSize - 1;
@@ -137,7 +139,7 @@ inline void copyLCharsFromUCharSource(LChar* destination, const UChar* source, s
ASSERT(!(source[i] & 0xff00));
destination[i] = static_cast<LChar>(source[i]);
}
-#elif COMPILER(GCC) && CPU(ARM64) && defined(NDEBUG)
+#elif COMPILER(GCC_OR_CLANG) && CPU(ARM64) && defined(NDEBUG)
const LChar* const end = destination + length;
const uintptr_t memoryAccessSize = 16;
@@ -158,7 +160,7 @@ inline void copyLCharsFromUCharSource(LChar* destination, const UChar* source, s
while (destination != end)
*destination++ = static_cast<LChar>(*source++);
-#elif COMPILER(GCC) && CPU(ARM_NEON) && !(PLATFORM(BIG_ENDIAN) || PLATFORM(MIDDLE_ENDIAN)) && defined(NDEBUG)
+#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_NEON) && !(CPU(BIG_ENDIAN) || CPU(MIDDLE_ENDIAN)) && defined(NDEBUG)
const LChar* const end = destination + length;
const uintptr_t memoryAccessSize = 8;
diff --git a/Source/WTF/wtf/text/AtomicString.cpp b/Source/WTF/wtf/text/AtomicString.cpp
index 5803dd018..cd8ef8ffc 100644
--- a/Source/WTF/wtf/text/AtomicString.cpp
+++ b/Source/WTF/wtf/text/AtomicString.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2004-2008, 2013-2014, 2016 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
* Copyright (C) 2012 Google Inc. All rights reserved.
*
@@ -23,452 +23,81 @@
#include "config.h"
#include "AtomicString.h"
-#include "AtomicStringTable.h"
-#include "HashSet.h"
#include "IntegerToStringConversion.h"
-#include "StringHash.h"
-#include "Threading.h"
-#include "WTFThreadData.h"
#include "dtoa.h"
-#include <wtf/unicode/UTF8.h>
#if USE(WEB_THREAD)
-#include "TCSpinLock.h"
+#include "Lock.h"
#endif
namespace WTF {
-using namespace Unicode;
-
-static_assert(sizeof(AtomicString) == sizeof(String), "AtomicString and String must be same size!");
-
-#if USE(WEB_THREAD)
-
-class AtomicStringTableLocker : public SpinLockHolder {
- WTF_MAKE_NONCOPYABLE(AtomicStringTableLocker);
-
- static SpinLock s_stringTableLock;
-public:
- AtomicStringTableLocker()
- : SpinLockHolder(&s_stringTableLock)
- {
- }
-};
-
-SpinLock AtomicStringTableLocker::s_stringTableLock = SPINLOCK_INITIALIZER;
-
-#else
-
-class AtomicStringTableLocker {
- WTF_MAKE_NONCOPYABLE(AtomicStringTableLocker);
-public:
- AtomicStringTableLocker() { }
-};
-
-#endif // USE(WEB_THREAD)
-
-static ALWAYS_INLINE HashSet<StringImpl*>& stringTable()
-{
- return wtfThreadData().atomicStringTable()->table();
-}
-
-template<typename T, typename HashTranslator>
-static inline PassRefPtr<StringImpl> addToStringTable(const T& value)
-{
- AtomicStringTableLocker locker;
-
- HashSet<StringImpl*>::AddResult addResult = stringTable().add<HashTranslator>(value);
-
- // If the string is newly-translated, then we need to adopt it.
- // The boolean in the pair tells us if that is so.
- return addResult.isNewEntry ? adoptRef(*addResult.iterator) : *addResult.iterator;
-}
-
-struct CStringTranslator {
- static unsigned hash(const LChar* c)
- {
- return StringHasher::computeHashAndMaskTop8Bits(c);
- }
-
- static inline bool equal(StringImpl* r, const LChar* s)
- {
- return WTF::equal(r, s);
- }
-
- static void translate(StringImpl*& location, const LChar* const& c, unsigned hash)
- {
- location = &StringImpl::create(c).leakRef();
- location->setHash(hash);
- location->setIsAtomic(true);
- }
-};
-
-PassRefPtr<StringImpl> AtomicString::add(const LChar* c)
+template<AtomicString::CaseConvertType type>
+ALWAYS_INLINE AtomicString AtomicString::convertASCIICase() const
{
- if (!c)
- return 0;
- if (!*c)
- return StringImpl::empty();
-
- return addToStringTable<const LChar*, CStringTranslator>(c);
-}
-
-template<typename CharacterType>
-struct HashTranslatorCharBuffer {
- const CharacterType* s;
- unsigned length;
-};
-
-typedef HashTranslatorCharBuffer<UChar> UCharBuffer;
-struct UCharBufferTranslator {
- static unsigned hash(const UCharBuffer& buf)
- {
- return StringHasher::computeHashAndMaskTop8Bits(buf.s, buf.length);
- }
-
- static bool equal(StringImpl* const& str, const UCharBuffer& buf)
- {
- return WTF::equal(str, buf.s, buf.length);
- }
-
- static void translate(StringImpl*& location, const UCharBuffer& buf, unsigned hash)
- {
- location = &StringImpl::create8BitIfPossible(buf.s, buf.length).leakRef();
- location->setHash(hash);
- location->setIsAtomic(true);
- }
-};
-
-template<typename CharacterType>
-struct HashAndCharacters {
- unsigned hash;
- const CharacterType* characters;
- unsigned length;
-};
-
-template<typename CharacterType>
-struct HashAndCharactersTranslator {
- static unsigned hash(const HashAndCharacters<CharacterType>& buffer)
- {
- ASSERT(buffer.hash == StringHasher::computeHashAndMaskTop8Bits(buffer.characters, buffer.length));
- return buffer.hash;
- }
-
- static bool equal(StringImpl* const& string, const HashAndCharacters<CharacterType>& buffer)
- {
- return WTF::equal(string, buffer.characters, buffer.length);
- }
-
- static void translate(StringImpl*& location, const HashAndCharacters<CharacterType>& buffer, unsigned hash)
- {
- location = &StringImpl::create(buffer.characters, buffer.length).leakRef();
- location->setHash(hash);
- location->setIsAtomic(true);
- }
-};
+ StringImpl* impl = this->impl();
+ if (UNLIKELY(!impl))
+ return nullAtom;
-struct HashAndUTF8Characters {
- unsigned hash;
- const char* characters;
+ // Convert short strings without allocating a new StringImpl, since
+ // there's a good chance these strings are already in the atomic
+ // string table and so no memory allocation will be required.
unsigned length;
- unsigned utf16Length;
-};
-
-struct HashAndUTF8CharactersTranslator {
- static unsigned hash(const HashAndUTF8Characters& buffer)
- {
- return buffer.hash;
- }
-
- static bool equal(StringImpl* const& string, const HashAndUTF8Characters& buffer)
- {
- if (buffer.utf16Length != string->length())
- return false;
-
- // If buffer contains only ASCII characters UTF-8 and UTF16 length are the same.
- if (buffer.utf16Length != buffer.length) {
- const UChar* stringCharacters = string->deprecatedCharacters();
-
- return equalUTF16WithUTF8(stringCharacters, stringCharacters + string->length(), buffer.characters, buffer.characters + buffer.length);
- }
-
- if (string->is8Bit()) {
- const LChar* stringCharacters = string->characters8();
-
- for (unsigned i = 0; i < buffer.length; ++i) {
- ASSERT(isASCII(buffer.characters[i]));
- if (stringCharacters[i] != buffer.characters[i])
- return false;
+ const unsigned localBufferSize = 100;
+ if (impl->is8Bit() && (length = impl->length()) <= localBufferSize) {
+ const LChar* characters = impl->characters8();
+ unsigned failingIndex;
+ for (unsigned i = 0; i < length; ++i) {
+ if (type == CaseConvertType::Lower ? UNLIKELY(isASCIIUpper(characters[i])) : LIKELY(isASCIILower(characters[i]))) {
+ failingIndex = i;
+ goto SlowPath;
}
-
- return true;
}
-
- const UChar* stringCharacters = string->characters16();
-
- for (unsigned i = 0; i < buffer.length; ++i) {
- ASSERT(isASCII(buffer.characters[i]));
- if (stringCharacters[i] != buffer.characters[i])
- return false;
- }
-
- return true;
- }
-
- static void translate(StringImpl*& location, const HashAndUTF8Characters& buffer, unsigned hash)
- {
- UChar* target;
- RefPtr<StringImpl> newString = StringImpl::createUninitialized(buffer.utf16Length, target);
-
- bool isAllASCII;
- const char* source = buffer.characters;
- if (convertUTF8ToUTF16(&source, source + buffer.length, &target, target + buffer.utf16Length, &isAllASCII) != conversionOK)
- ASSERT_NOT_REACHED();
-
- if (isAllASCII)
- newString = StringImpl::create(buffer.characters, buffer.length);
-
- location = newString.release().leakRef();
- location->setHash(hash);
- location->setIsAtomic(true);
- }
-};
-
-PassRefPtr<StringImpl> AtomicString::add(const UChar* s, unsigned length)
-{
- if (!s)
- return 0;
-
- if (!length)
- return StringImpl::empty();
-
- UCharBuffer buffer = { s, length };
- return addToStringTable<UCharBuffer, UCharBufferTranslator>(buffer);
-}
-
-PassRefPtr<StringImpl> AtomicString::add(const UChar* s, unsigned length, unsigned existingHash)
-{
- ASSERT(s);
- ASSERT(existingHash);
-
- if (!length)
- return StringImpl::empty();
-
- HashAndCharacters<UChar> buffer = { existingHash, s, length };
- return addToStringTable<HashAndCharacters<UChar>, HashAndCharactersTranslator<UChar>>(buffer);
-}
-
-PassRefPtr<StringImpl> AtomicString::add(const UChar* s)
-{
- if (!s)
- return 0;
-
- unsigned length = 0;
- while (s[length] != UChar(0))
- ++length;
-
- if (!length)
- return StringImpl::empty();
-
- UCharBuffer buffer = { s, length };
- return addToStringTable<UCharBuffer, UCharBufferTranslator>(buffer);
-}
-
-struct SubstringLocation {
- StringImpl* baseString;
- unsigned start;
- unsigned length;
-};
-
-struct SubstringTranslator {
- static unsigned hash(const SubstringLocation& buffer)
- {
- return StringHasher::computeHashAndMaskTop8Bits(buffer.baseString->deprecatedCharacters() + buffer.start, buffer.length);
- }
-
- static bool equal(StringImpl* const& string, const SubstringLocation& buffer)
- {
- return WTF::equal(string, buffer.baseString->deprecatedCharacters() + buffer.start, buffer.length);
+ return *this;
+SlowPath:
+ LChar localBuffer[localBufferSize];
+ for (unsigned i = 0; i < failingIndex; ++i)
+ localBuffer[i] = characters[i];
+ for (unsigned i = failingIndex; i < length; ++i)
+ localBuffer[i] = type == CaseConvertType::Lower ? toASCIILower(characters[i]) : toASCIIUpper(characters[i]);
+ return AtomicString(localBuffer, length);
}
- static void translate(StringImpl*& location, const SubstringLocation& buffer, unsigned hash)
- {
- location = &StringImpl::create(buffer.baseString, buffer.start, buffer.length).leakRef();
- location->setHash(hash);
- location->setIsAtomic(true);
- }
-};
-
-PassRefPtr<StringImpl> AtomicString::add(StringImpl* baseString, unsigned start, unsigned length)
-{
- if (!baseString)
- return 0;
-
- if (!length || start >= baseString->length())
- return StringImpl::empty();
+ Ref<StringImpl> convertedString = type == CaseConvertType::Lower ? impl->convertToASCIILowercase() : impl->convertToASCIIUppercase();
+ if (LIKELY(convertedString.ptr() == impl))
+ return *this;
- unsigned maxLength = baseString->length() - start;
- if (length >= maxLength) {
- if (!start)
- return add(baseString);
- length = maxLength;
- }
-
- SubstringLocation buffer = { baseString, start, length };
- return addToStringTable<SubstringLocation, SubstringTranslator>(buffer);
+ AtomicString result;
+ result.m_string = AtomicStringImpl::add(convertedString.ptr());
+ return result;
}
-
-typedef HashTranslatorCharBuffer<LChar> LCharBuffer;
-struct LCharBufferTranslator {
- static unsigned hash(const LCharBuffer& buf)
- {
- return StringHasher::computeHashAndMaskTop8Bits(buf.s, buf.length);
- }
-
- static bool equal(StringImpl* const& str, const LCharBuffer& buf)
- {
- return WTF::equal(str, buf.s, buf.length);
- }
-
- static void translate(StringImpl*& location, const LCharBuffer& buf, unsigned hash)
- {
- location = &StringImpl::create(buf.s, buf.length).leakRef();
- location->setHash(hash);
- location->setIsAtomic(true);
- }
-};
-
-typedef HashTranslatorCharBuffer<char> CharBuffer;
-struct CharBufferFromLiteralDataTranslator {
- static unsigned hash(const CharBuffer& buf)
- {
- return StringHasher::computeHashAndMaskTop8Bits(reinterpret_cast<const LChar*>(buf.s), buf.length);
- }
- static bool equal(StringImpl* const& str, const CharBuffer& buf)
- {
- return WTF::equal(str, buf.s, buf.length);
- }
-
- static void translate(StringImpl*& location, const CharBuffer& buf, unsigned hash)
- {
- location = &StringImpl::createFromLiteral(buf.s, buf.length).leakRef();
- location->setHash(hash);
- location->setIsAtomic(true);
- }
-};
-
-PassRefPtr<StringImpl> AtomicString::add(const LChar* s, unsigned length)
+AtomicString AtomicString::convertToASCIILowercase() const
{
- if (!s)
- return 0;
-
- if (!length)
- return StringImpl::empty();
-
- LCharBuffer buffer = { s, length };
- return addToStringTable<LCharBuffer, LCharBufferTranslator>(buffer);
+ return convertASCIICase<CaseConvertType::Lower>();
}
-PassRefPtr<StringImpl> AtomicString::addFromLiteralData(const char* characters, unsigned length)
+AtomicString AtomicString::convertToASCIIUppercase() const
{
- ASSERT(characters);
- ASSERT(length);
-
- CharBuffer buffer = { characters, length };
- return addToStringTable<CharBuffer, CharBufferFromLiteralDataTranslator>(buffer);
+ return convertASCIICase<CaseConvertType::Upper>();
}
-PassRefPtr<StringImpl> AtomicString::addSlowCase(StringImpl* string)
-{
- if (!string->length())
- return StringImpl::empty();
-
- ASSERT_WITH_MESSAGE(!string->isAtomic(), "AtomicString should not hit the slow case if the string is already atomic.");
-
- AtomicStringTableLocker locker;
- HashSet<StringImpl*>::AddResult addResult = stringTable().add(string);
-
- if (addResult.isNewEntry) {
- ASSERT(*addResult.iterator == string);
- string->setIsAtomic(true);
- }
-
- return *addResult.iterator;
-}
-
-template<typename CharacterType>
-static inline HashSet<StringImpl*>::iterator findString(const StringImpl* stringImpl)
-{
- HashAndCharacters<CharacterType> buffer = { stringImpl->existingHash(), stringImpl->getCharacters<CharacterType>(), stringImpl->length() };
- return stringTable().find<HashAndCharactersTranslator<CharacterType>>(buffer);
-}
-
-AtomicStringImpl* AtomicString::find(const StringImpl* stringImpl)
-{
- ASSERT(stringImpl);
- ASSERT(stringImpl->existingHash());
-
- if (!stringImpl->length())
- return static_cast<AtomicStringImpl*>(StringImpl::empty());
-
- AtomicStringTableLocker locker;
- HashSet<StringImpl*>::iterator iterator;
- if (stringImpl->is8Bit())
- iterator = findString<LChar>(stringImpl);
- else
- iterator = findString<UChar>(stringImpl);
- if (iterator == stringTable().end())
- return 0;
- return static_cast<AtomicStringImpl*>(*iterator);
-}
-
-void AtomicString::remove(StringImpl* string)
-{
- ASSERT(string->isAtomic());
- AtomicStringTableLocker locker;
- HashSet<StringImpl*>& atomicStringTable = stringTable();
- HashSet<StringImpl*>::iterator iterator = atomicStringTable.find(string);
- ASSERT_WITH_MESSAGE(iterator != atomicStringTable.end(), "The string being removed is atomic in the string table of an other thread!");
- atomicStringTable.remove(iterator);
-}
-
-AtomicString AtomicString::lower() const
+AtomicString AtomicString::number(int number)
{
- // Note: This is a hot function in the Dromaeo benchmark.
- StringImpl* impl = this->impl();
- if (UNLIKELY(!impl))
- return AtomicString();
-
- RefPtr<StringImpl> lowerImpl = impl->lower();
- AtomicString returnValue;
- if (LIKELY(lowerImpl == impl))
- returnValue.m_string = lowerImpl.release();
- else
- returnValue.m_string = addSlowCase(lowerImpl.get());
- return returnValue;
+ return numberToStringSigned<AtomicString>(number);
}
-AtomicString AtomicString::fromUTF8Internal(const char* charactersStart, const char* charactersEnd)
+AtomicString AtomicString::number(unsigned number)
{
- HashAndUTF8Characters buffer;
- buffer.characters = charactersStart;
- buffer.hash = calculateStringHashAndLengthFromUTF8MaskingTop8Bits(charactersStart, charactersEnd, buffer.length, buffer.utf16Length);
-
- if (!buffer.hash)
- return nullAtom;
-
- AtomicString atomicString;
- atomicString.m_string = addToStringTable<HashAndUTF8Characters, HashAndUTF8CharactersTranslator>(buffer);
- return atomicString;
+ return numberToStringUnsigned<AtomicString>(number);
}
-AtomicString AtomicString::number(int number)
+AtomicString AtomicString::number(unsigned long number)
{
- return numberToStringSigned<AtomicString>(number);
+ return numberToStringUnsigned<AtomicString>(number);
}
-AtomicString AtomicString::number(unsigned number)
+AtomicString AtomicString::number(unsigned long long number)
{
return numberToStringUnsigned<AtomicString>(number);
}
@@ -479,13 +108,13 @@ AtomicString AtomicString::number(double number)
return String(numberToFixedPrecisionString(number, 6, buffer, true));
}
-#if !ASSERT_DISABLED
-bool AtomicString::isInAtomicStringTable(StringImpl* string)
+AtomicString AtomicString::fromUTF8Internal(const char* charactersStart, const char* charactersEnd)
{
- AtomicStringTableLocker locker;
- return stringTable().contains(string);
+ auto impl = AtomicStringImpl::addUTF8(charactersStart, charactersEnd);
+ if (!impl)
+ return nullAtom;
+ return impl.get();
}
-#endif
#ifndef NDEBUG
void AtomicString::show() const
diff --git a/Source/WTF/wtf/text/AtomicString.h b/Source/WTF/wtf/text/AtomicString.h
index 4142de142..91bb20a8b 100644
--- a/Source/WTF/wtf/text/AtomicString.h
+++ b/Source/WTF/wtf/text/AtomicString.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004, 2005, 2006, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2004-2006, 2008, 2014-2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -23,6 +23,7 @@
#include <utility>
#include <wtf/text/AtomicStringImpl.h>
+#include <wtf/text/IntegerToStringConversion.h>
#include <wtf/text/WTFString.h>
// Define 'NO_IMPLICIT_ATOMICSTRING' before including this header,
@@ -41,34 +42,38 @@ class AtomicString {
public:
WTF_EXPORT_PRIVATE static void init();
- AtomicString() { }
- AtomicString(const LChar* s) : m_string(add(s)) { }
- AtomicString(const char* s) : m_string(add(s)) { }
- AtomicString(const LChar* s, unsigned length) : m_string(add(s, length)) { }
- AtomicString(const UChar* s, unsigned length) : m_string(add(s, length)) { }
- AtomicString(const UChar* s, unsigned length, unsigned existingHash) : m_string(add(s, length, existingHash)) { }
- AtomicString(const UChar* s) : m_string(add(s)) { }
+ AtomicString();
+ AtomicString(const LChar*);
+ AtomicString(const char*);
+ AtomicString(const LChar*, unsigned length);
+ AtomicString(const UChar*, unsigned length);
+ AtomicString(const UChar*, unsigned length, unsigned existingHash);
+ AtomicString(const UChar*);
template<size_t inlineCapacity>
explicit AtomicString(const Vector<UChar, inlineCapacity>& characters)
- : m_string(add(characters.data(), characters.size()))
+ : m_string(AtomicStringImpl::add(characters.data(), characters.size()))
{
}
- ATOMICSTRING_CONVERSION AtomicString(StringImpl* imp) : m_string(add(imp)) { }
- AtomicString(AtomicStringImpl* imp) : m_string(imp) { }
- ATOMICSTRING_CONVERSION AtomicString(const String& s) : m_string(add(s.impl())) { }
- AtomicString(StringImpl* baseString, unsigned start, unsigned length) : m_string(add(baseString, start, length)) { }
+ AtomicString(AtomicStringImpl*);
+ AtomicString(RefPtr<AtomicStringImpl>&&);
+ ATOMICSTRING_CONVERSION AtomicString(StringImpl*);
+ ATOMICSTRING_CONVERSION AtomicString(const String&);
+ AtomicString(StringImpl* baseString, unsigned start, unsigned length);
+
+ // FIXME: AtomicString doesn’t always have AtomicStringImpl, so one of those two names needs to change..
+ AtomicString(UniquedStringImpl* uid);
enum ConstructFromLiteralTag { ConstructFromLiteral };
AtomicString(const char* characters, unsigned length, ConstructFromLiteralTag)
- : m_string(addFromLiteralData(characters, length))
+ : m_string(AtomicStringImpl::addLiteral(characters, length))
{
}
template<unsigned charactersCount>
ALWAYS_INLINE AtomicString(const char (&characters)[charactersCount], ConstructFromLiteralTag)
- : m_string(addFromLiteralData(characters, charactersCount - 1))
+ : m_string(AtomicStringImpl::addLiteral(characters, charactersCount - 1))
{
COMPILE_ASSERT(charactersCount > 1, AtomicStringFromLiteralNotEmpty);
COMPILE_ASSERT((charactersCount - 1 <= ((unsigned(~0) - sizeof(StringImpl)) / sizeof(LChar))), AtomicStringFromLiteralCannotOverflow);
@@ -77,15 +82,15 @@ public:
// We have to declare the copy constructor and copy assignment operator as well, otherwise
// they'll be implicitly deleted by adding the move constructor and move assignment operator.
AtomicString(const AtomicString& other) : m_string(other.m_string) { }
- AtomicString(AtomicString&& other) : m_string(std::move(other.m_string)) { }
+ AtomicString(AtomicString&& other) : m_string(WTFMove(other.m_string)) { }
AtomicString& operator=(const AtomicString& other) { m_string = other.m_string; return *this; }
- AtomicString& operator=(AtomicString&& other) { m_string = std::move(other.m_string); return *this; }
+ AtomicString& operator=(AtomicString&& other) { m_string = WTFMove(other.m_string); return *this; }
// Hash table deleted values, which are only constructed and never copied or destroyed.
AtomicString(WTF::HashTableDeletedValueType) : m_string(WTF::HashTableDeletedValue) { }
bool isHashTableDeletedValue() const { return m_string.isHashTableDeletedValue(); }
- WTF_EXPORT_STRING_API static AtomicStringImpl* find(const StringImpl*);
+ unsigned existingHash() const { return isNull() ? 0 : impl()->existingHash(); }
operator const String&() const { return m_string; }
const String& string() const { return m_string; };
@@ -93,31 +98,46 @@ public:
AtomicStringImpl* impl() const { return static_cast<AtomicStringImpl *>(m_string.impl()); }
bool is8Bit() const { return m_string.is8Bit(); }
- const UChar* characters() const { return m_string.deprecatedCharacters(); } // FIXME: Delete this.
const LChar* characters8() const { return m_string.characters8(); }
const UChar* characters16() const { return m_string.characters16(); }
unsigned length() const { return m_string.length(); }
-
+
UChar operator[](unsigned int i) const { return m_string[i]; }
WTF_EXPORT_STRING_API static AtomicString number(int);
WTF_EXPORT_STRING_API static AtomicString number(unsigned);
+ WTF_EXPORT_STRING_API static AtomicString number(unsigned long);
+ WTF_EXPORT_STRING_API static AtomicString number(unsigned long long);
WTF_EXPORT_STRING_API static AtomicString number(double);
// If we need more overloads of the number function, we can add all the others that String has, but these seem to do for now.
bool contains(UChar c) const { return m_string.contains(c); }
bool contains(const LChar* s, bool caseSensitive = true) const
{ return m_string.contains(s, caseSensitive); }
- bool contains(const String& s, bool caseSensitive = true) const
+ bool contains(const String& s) const
+ { return m_string.contains(s); }
+ bool contains(const String& s, bool caseSensitive) const
{ return m_string.contains(s, caseSensitive); }
+ bool containsIgnoringASCIICase(const String& s) const
+ { return m_string.containsIgnoringASCIICase(s); }
size_t find(UChar c, unsigned start = 0) const { return m_string.find(c, start); }
size_t find(const LChar* s, unsigned start = 0, bool caseSentitive = true) const
{ return m_string.find(s, start, caseSentitive); }
size_t find(const String& s, unsigned start = 0, bool caseSentitive = true) const
{ return m_string.find(s, start, caseSentitive); }
-
- bool startsWith(const String& s, bool caseSensitive = true) const
+ size_t findIgnoringASCIICase(const String& s) const
+ { return m_string.findIgnoringASCIICase(s); }
+ size_t findIgnoringASCIICase(const String& s, unsigned startOffset) const
+ { return m_string.findIgnoringASCIICase(s, startOffset); }
+ size_t find(CharacterMatchFunctionPtr matchFunction, unsigned start = 0) const
+ { return m_string.find(matchFunction, start); }
+
+ bool startsWith(const String& s) const
+ { return m_string.startsWith(s); }
+ bool startsWithIgnoringASCIICase(const String& s) const
+ { return m_string.startsWithIgnoringASCIICase(s); }
+ bool startsWith(const String& s, bool caseSensitive) const
{ return m_string.startsWith(s, caseSensitive); }
bool startsWith(UChar character) const
{ return m_string.startsWith(character); }
@@ -125,17 +145,21 @@ public:
bool startsWith(const char (&prefix)[matchLength], bool caseSensitive = true) const
{ return m_string.startsWith<matchLength>(prefix, caseSensitive); }
- bool endsWith(const String& s, bool caseSensitive = true) const
+ bool endsWith(const String& s) const
+ { return m_string.endsWith(s); }
+ bool endsWithIgnoringASCIICase(const String& s) const
+ { return m_string.endsWithIgnoringASCIICase(s); }
+ bool endsWith(const String& s, bool caseSensitive) const
{ return m_string.endsWith(s, caseSensitive); }
bool endsWith(UChar character) const
{ return m_string.endsWith(character); }
template<unsigned matchLength>
bool endsWith(const char (&prefix)[matchLength], bool caseSensitive = true) const
{ return m_string.endsWith<matchLength>(prefix, caseSensitive); }
-
- WTF_EXPORT_STRING_API AtomicString lower() const;
- AtomicString upper() const { return AtomicString(impl()->upper()); }
-
+
+ WTF_EXPORT_STRING_API AtomicString convertToASCIILowercase() const;
+ WTF_EXPORT_STRING_API AtomicString convertToASCIIUppercase() const;
+
int toInt(bool* ok = 0) const { return m_string.toInt(ok); }
double toDouble(bool* ok = 0) const { return m_string.toDouble(ok); }
float toFloat(bool* ok = 0) const { return m_string.toFloat(ok); }
@@ -144,13 +168,11 @@ public:
bool isNull() const { return m_string.isNull(); }
bool isEmpty() const { return m_string.isEmpty(); }
- static void remove(StringImpl*);
-
#if USE(CF)
- AtomicString(CFStringRef s) : m_string(add(s)) { }
-#endif
+ AtomicString(CFStringRef);
+#endif
#ifdef __OBJC__
- AtomicString(NSString* s) : m_string(add((CFStringRef)s)) { }
+ AtomicString(NSString*);
operator NSString*() const { return m_string; }
#endif
@@ -167,37 +189,16 @@ private:
// The explicit constructors with AtomicString::ConstructFromLiteral must be used for literals.
AtomicString(ASCIILiteral);
- String m_string;
-
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> add(const LChar*);
- ALWAYS_INLINE static PassRefPtr<StringImpl> add(const char* s) { return add(reinterpret_cast<const LChar*>(s)); };
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> add(const LChar*, unsigned length);
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> add(const UChar*, unsigned length);
- ALWAYS_INLINE static PassRefPtr<StringImpl> add(const char* s, unsigned length) { return add(reinterpret_cast<const LChar*>(s), length); };
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> add(const UChar*, unsigned length, unsigned existingHash);
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> add(const UChar*);
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> add(StringImpl*, unsigned offset, unsigned length);
- ALWAYS_INLINE static PassRefPtr<StringImpl> add(StringImpl* string)
- {
- if (!string || string->isAtomic()) {
- ASSERT_WITH_MESSAGE(!string || isInAtomicStringTable(string), "The atomic string comes from an other thread!");
- return string;
- }
- return addSlowCase(string);
- }
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> addFromLiteralData(const char* characters, unsigned length);
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> addSlowCase(StringImpl*);
-#if USE(CF)
- WTF_EXPORT_STRING_API static PassRefPtr<StringImpl> add(CFStringRef);
-#endif
+ enum class CaseConvertType { Upper, Lower };
+ template<CaseConvertType> AtomicString convertASCIICase() const;
WTF_EXPORT_STRING_API static AtomicString fromUTF8Internal(const char*, const char*);
-#if !ASSERT_DISABLED
- WTF_EXPORT_STRING_API static bool isInAtomicStringTable(StringImpl*);
-#endif
+ String m_string;
};
+static_assert(sizeof(AtomicString) == sizeof(String), "AtomicString and String must be same size!");
+
inline bool operator==(const AtomicString& a, const AtomicString& b) { return a.impl() == b.impl(); }
bool operator==(const AtomicString&, const LChar*);
inline bool operator==(const AtomicString& a, const char* b) { return WTF::equal(a.impl(), reinterpret_cast<const LChar*>(b)); }
@@ -216,25 +217,99 @@ inline bool operator!=(const LChar* a, const AtomicString& b) { return !(b == a)
inline bool operator!=(const String& a, const AtomicString& b) { return !equal(a.impl(), b.impl()); }
inline bool operator!=(const Vector<UChar>& a, const AtomicString& b) { return !(a == b); }
-inline bool equalIgnoringCase(const AtomicString& a, const AtomicString& b) { return equalIgnoringCase(a.impl(), b.impl()); }
-inline bool equalIgnoringCase(const AtomicString& a, const LChar* b) { return equalIgnoringCase(a.impl(), b); }
-inline bool equalIgnoringCase(const AtomicString& a, const char* b) { return equalIgnoringCase(a.impl(), reinterpret_cast<const LChar*>(b)); }
-inline bool equalIgnoringCase(const AtomicString& a, const String& b) { return equalIgnoringCase(a.impl(), b.impl()); }
-inline bool equalIgnoringCase(const LChar* a, const AtomicString& b) { return equalIgnoringCase(a, b.impl()); }
-inline bool equalIgnoringCase(const char* a, const AtomicString& b) { return equalIgnoringCase(reinterpret_cast<const LChar*>(a), b.impl()); }
-inline bool equalIgnoringCase(const String& a, const AtomicString& b) { return equalIgnoringCase(a.impl(), b.impl()); }
+bool equalIgnoringASCIICase(const AtomicString&, const AtomicString&);
+bool equalIgnoringASCIICase(const AtomicString&, const String&);
+bool equalIgnoringASCIICase(const String&, const AtomicString&);
+bool equalIgnoringASCIICase(const AtomicString&, const char*);
+
+template<unsigned length> bool equalLettersIgnoringASCIICase(const AtomicString&, const char (&lowercaseLetters)[length]);
+
+inline AtomicString::AtomicString()
+{
+}
+
+inline AtomicString::AtomicString(const LChar* s)
+ : m_string(AtomicStringImpl::add(s))
+{
+}
+
+inline AtomicString::AtomicString(const char* s)
+ : m_string(AtomicStringImpl::add(s))
+{
+}
+
+inline AtomicString::AtomicString(const LChar* s, unsigned length)
+ : m_string(AtomicStringImpl::add(s, length))
+{
+}
+
+inline AtomicString::AtomicString(const UChar* s, unsigned length)
+ : m_string(AtomicStringImpl::add(s, length))
+{
+}
+
+inline AtomicString::AtomicString(const UChar* s, unsigned length, unsigned existingHash)
+ : m_string(AtomicStringImpl::add(s, length, existingHash))
+{
+}
+
+inline AtomicString::AtomicString(const UChar* s)
+ : m_string(AtomicStringImpl::add(s))
+{
+}
+
+inline AtomicString::AtomicString(AtomicStringImpl* imp)
+ : m_string(imp)
+{
+}
+
+inline AtomicString::AtomicString(RefPtr<AtomicStringImpl>&& imp)
+ : m_string(WTFMove(imp))
+{
+}
+
+inline AtomicString::AtomicString(StringImpl* imp)
+ : m_string(AtomicStringImpl::add(imp))
+{
+}
+
+inline AtomicString::AtomicString(const String& s)
+ : m_string(AtomicStringImpl::add(s.impl()))
+{
+}
+
+inline AtomicString::AtomicString(StringImpl* baseString, unsigned start, unsigned length)
+ : m_string(AtomicStringImpl::add(baseString, start, length))
+{
+}
+
+inline AtomicString::AtomicString(UniquedStringImpl* uid)
+ : m_string(uid)
+{
+}
+
+#if USE(CF)
+inline AtomicString::AtomicString(CFStringRef s)
+ : m_string(AtomicStringImpl::add(s))
+{
+}
+#endif
+
+#ifdef __OBJC__
+inline AtomicString::AtomicString(NSString* s)
+ : m_string(AtomicStringImpl::add((__bridge CFStringRef)s))
+{
+}
+#endif
// Define external global variables for the commonly used atomic strings.
// These are only usable from the main thread.
#ifndef ATOMICSTRING_HIDE_GLOBALS
extern const WTF_EXPORTDATA AtomicString nullAtom;
extern const WTF_EXPORTDATA AtomicString emptyAtom;
-extern const WTF_EXPORTDATA AtomicString textAtom;
-extern const WTF_EXPORTDATA AtomicString commentAtom;
extern const WTF_EXPORTDATA AtomicString starAtom;
extern const WTF_EXPORTDATA AtomicString xmlAtom;
extern const WTF_EXPORTDATA AtomicString xmlnsAtom;
-extern const WTF_EXPORTDATA AtomicString xlinkAtom;
inline AtomicString AtomicString::fromUTF8(const char* characters, size_t length)
{
@@ -251,7 +326,7 @@ inline AtomicString AtomicString::fromUTF8(const char* characters)
return nullAtom;
if (!*characters)
return emptyAtom;
- return fromUTF8Internal(characters, 0);
+ return fromUTF8Internal(characters, nullptr);
}
#endif
@@ -261,19 +336,48 @@ template<> struct DefaultHash<AtomicString> {
typedef AtomicStringHash Hash;
};
+template<unsigned length> inline bool equalLettersIgnoringASCIICase(const AtomicString& string, const char (&lowercaseLetters)[length])
+{
+ return equalLettersIgnoringASCIICase(string.string(), lowercaseLetters);
+}
+
+inline bool equalIgnoringASCIICase(const AtomicString& a, const AtomicString& b)
+{
+ return equalIgnoringASCIICase(a.string(), b.string());
+}
+
+inline bool equalIgnoringASCIICase(const AtomicString& a, const String& b)
+{
+ return equalIgnoringASCIICase(a.string(), b);
+}
+
+inline bool equalIgnoringASCIICase(const String& a, const AtomicString& b)
+{
+ return equalIgnoringASCIICase(a, b.string());
+}
+
+inline bool equalIgnoringASCIICase(const AtomicString& a, const char* b)
+{
+ return equalIgnoringASCIICase(a.string(), b);
+}
+
+template<> struct IntegerToStringConversionTrait<AtomicString> {
+ using ReturnType = AtomicString;
+ using AdditionalArgumentType = void;
+ static AtomicString flush(LChar* characters, unsigned length, void*) { return { characters, length }; }
+};
+
} // namespace WTF
#ifndef ATOMICSTRING_HIDE_GLOBALS
using WTF::AtomicString;
using WTF::nullAtom;
using WTF::emptyAtom;
-using WTF::textAtom;
-using WTF::commentAtom;
using WTF::starAtom;
using WTF::xmlAtom;
using WTF::xmlnsAtom;
-using WTF::xlinkAtom;
#endif
#include <wtf/text/StringConcatenate.h>
+
#endif // AtomicString_h
diff --git a/Source/WTF/wtf/text/AtomicStringHash.h b/Source/WTF/wtf/text/AtomicStringHash.h
index 6130d9493..417619350 100644
--- a/Source/WTF/wtf/text/AtomicStringHash.h
+++ b/Source/WTF/wtf/text/AtomicStringHash.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -48,11 +48,20 @@ namespace WTF {
static const bool safeToCompareToEmptyOrDeleted = false;
};
- // AtomicStringHash is the default hash for AtomicString
- template<> struct HashTraits<WTF::AtomicString> : GenericHashTraits<WTF::AtomicString> {
- static const bool emptyValueIsZero = true;
- static void constructDeletedValue(WTF::AtomicString& slot) { new (NotNull, &slot) WTF::AtomicString(HashTableDeletedValue); }
- static bool isDeletedValue(const WTF::AtomicString& slot) { return slot.isHashTableDeletedValue(); }
+ template<> struct HashTraits<WTF::AtomicString> : SimpleClassHashTraits<WTF::AtomicString> {
+ static const bool hasIsEmptyValueFunction = true;
+ static bool isEmptyValue(const AtomicString& value)
+ {
+ return value.isNull();
+ }
+
+ static void customDeleteBucket(AtomicString& value)
+ {
+ // See unique_ptr's customDeleteBucket() for an explanation.
+ ASSERT(!isDeletedValue(value));
+ AtomicString valueToBeDestroyed = WTFMove(value);
+ constructDeletedValue(value);
+ }
};
}
diff --git a/Source/WTF/wtf/text/AtomicStringImpl.cpp b/Source/WTF/wtf/text/AtomicStringImpl.cpp
new file mode 100644
index 000000000..fb50b7fdd
--- /dev/null
+++ b/Source/WTF/wtf/text/AtomicStringImpl.cpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright (C) 2004-2008, 2013-2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ * Copyright (C) 2015 Yusuke Suzuki<utatane.tea@gmail.com>. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "config.h"
+#include "AtomicStringImpl.h"
+
+#include "AtomicStringTable.h"
+#include "CommaPrinter.h"
+#include "DataLog.h"
+#include "HashSet.h"
+#include "IntegerToStringConversion.h"
+#include "StringHash.h"
+#include "StringPrintStream.h"
+#include "Threading.h"
+#include "WTFThreadData.h"
+#include <wtf/unicode/UTF8.h>
+
+#if USE(WEB_THREAD)
+#include "Lock.h"
+#endif
+
+namespace WTF {
+
+using namespace Unicode;
+
+#if USE(WEB_THREAD)
+
+class AtomicStringTableLocker : public LockHolder {
+ WTF_MAKE_NONCOPYABLE(AtomicStringTableLocker);
+
+ static StaticLock s_stringTableLock;
+public:
+ AtomicStringTableLocker()
+ : LockHolder(&s_stringTableLock)
+ {
+ }
+};
+
+StaticLock AtomicStringTableLocker::s_stringTableLock;
+
+#else
+
+class AtomicStringTableLocker {
+ WTF_MAKE_NONCOPYABLE(AtomicStringTableLocker);
+public:
+ AtomicStringTableLocker() { }
+};
+
+#endif // USE(WEB_THREAD)
+
+using StringTableImpl = HashSet<StringImpl*>;
+
+static ALWAYS_INLINE StringTableImpl& stringTable()
+{
+ return wtfThreadData().atomicStringTable()->table();
+}
+
+template<typename T, typename HashTranslator>
+static inline Ref<AtomicStringImpl> addToStringTable(AtomicStringTableLocker&, StringTableImpl& atomicStringTable, const T& value)
+{
+ auto addResult = atomicStringTable.add<HashTranslator>(value);
+
+ // If the string is newly-translated, then we need to adopt it.
+ // The boolean in the pair tells us if that is so.
+ if (addResult.isNewEntry)
+ return adoptRef(static_cast<AtomicStringImpl&>(**addResult.iterator));
+ return *static_cast<AtomicStringImpl*>(*addResult.iterator);
+}
+
+template<typename T, typename HashTranslator>
+static inline Ref<AtomicStringImpl> addToStringTable(const T& value)
+{
+ AtomicStringTableLocker locker;
+ return addToStringTable<T, HashTranslator>(locker, stringTable(), value);
+}
+
+struct CStringTranslator {
+ static unsigned hash(const LChar* c)
+ {
+ return StringHasher::computeHashAndMaskTop8Bits(c);
+ }
+
+ static inline bool equal(StringImpl* r, const LChar* s)
+ {
+ return WTF::equal(r, s);
+ }
+
+ static void translate(StringImpl*& location, const LChar* const& c, unsigned hash)
+ {
+ location = &StringImpl::create(c).leakRef();
+ location->setHash(hash);
+ location->setIsAtomic(true);
+ }
+};
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::add(const LChar* c)
+{
+ if (!c)
+ return nullptr;
+ if (!*c)
+ return static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ return addToStringTable<const LChar*, CStringTranslator>(c);
+}
+
+template<typename CharacterType>
+struct HashTranslatorCharBuffer {
+ const CharacterType* s;
+ unsigned length;
+};
+
+typedef HashTranslatorCharBuffer<UChar> UCharBuffer;
+struct UCharBufferTranslator {
+ static unsigned hash(const UCharBuffer& buf)
+ {
+ return StringHasher::computeHashAndMaskTop8Bits(buf.s, buf.length);
+ }
+
+ static bool equal(StringImpl* const& str, const UCharBuffer& buf)
+ {
+ return WTF::equal(str, buf.s, buf.length);
+ }
+
+ static void translate(StringImpl*& location, const UCharBuffer& buf, unsigned hash)
+ {
+ location = &StringImpl::create8BitIfPossible(buf.s, buf.length).leakRef();
+ location->setHash(hash);
+ location->setIsAtomic(true);
+ }
+};
+
+template<typename CharacterType>
+struct HashAndCharacters {
+ unsigned hash;
+ const CharacterType* characters;
+ unsigned length;
+};
+
+template<typename CharacterType>
+struct HashAndCharactersTranslator {
+ static unsigned hash(const HashAndCharacters<CharacterType>& buffer)
+ {
+ ASSERT(buffer.hash == StringHasher::computeHashAndMaskTop8Bits(buffer.characters, buffer.length));
+ return buffer.hash;
+ }
+
+ static bool equal(StringImpl* const& string, const HashAndCharacters<CharacterType>& buffer)
+ {
+ return WTF::equal(string, buffer.characters, buffer.length);
+ }
+
+ static void translate(StringImpl*& location, const HashAndCharacters<CharacterType>& buffer, unsigned hash)
+ {
+ location = &StringImpl::create(buffer.characters, buffer.length).leakRef();
+ location->setHash(hash);
+ location->setIsAtomic(true);
+ }
+};
+
+struct HashAndUTF8Characters {
+ unsigned hash;
+ const char* characters;
+ unsigned length;
+ unsigned utf16Length;
+};
+
+struct HashAndUTF8CharactersTranslator {
+ static unsigned hash(const HashAndUTF8Characters& buffer)
+ {
+ return buffer.hash;
+ }
+
+ static bool equal(StringImpl* const& string, const HashAndUTF8Characters& buffer)
+ {
+ if (buffer.utf16Length != string->length())
+ return false;
+
+ // If buffer contains only ASCII characters UTF-8 and UTF16 length are the same.
+ if (buffer.utf16Length != buffer.length) {
+ if (string->is8Bit())
+ return equalLatin1WithUTF8(string->characters8(), buffer.characters, buffer.characters + buffer.length);
+
+ return equalUTF16WithUTF8(string->characters16(), buffer.characters, buffer.characters + buffer.length);
+ }
+
+ if (string->is8Bit()) {
+ const LChar* stringCharacters = string->characters8();
+
+ for (unsigned i = 0; i < buffer.length; ++i) {
+ ASSERT(isASCII(buffer.characters[i]));
+ if (stringCharacters[i] != buffer.characters[i])
+ return false;
+ }
+
+ return true;
+ }
+
+ const UChar* stringCharacters = string->characters16();
+
+ for (unsigned i = 0; i < buffer.length; ++i) {
+ ASSERT(isASCII(buffer.characters[i]));
+ if (stringCharacters[i] != buffer.characters[i])
+ return false;
+ }
+
+ return true;
+ }
+
+ static void translate(StringImpl*& location, const HashAndUTF8Characters& buffer, unsigned hash)
+ {
+ UChar* target;
+ auto newString = StringImpl::createUninitialized(buffer.utf16Length, target);
+
+ bool isAllASCII;
+ const char* source = buffer.characters;
+ if (convertUTF8ToUTF16(&source, source + buffer.length, &target, target + buffer.utf16Length, &isAllASCII) != conversionOK)
+ ASSERT_NOT_REACHED();
+
+ if (isAllASCII)
+ newString = StringImpl::create(buffer.characters, buffer.length);
+
+ location = &newString.leakRef();
+ location->setHash(hash);
+ location->setIsAtomic(true);
+ }
+};
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::add(const UChar* s, unsigned length)
+{
+ if (!s)
+ return nullptr;
+
+ if (!length)
+ return static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ UCharBuffer buffer = { s, length };
+ return addToStringTable<UCharBuffer, UCharBufferTranslator>(buffer);
+}
+
+Ref<AtomicStringImpl> AtomicStringImpl::add(const UChar* s, unsigned length, unsigned existingHash)
+{
+ ASSERT(s);
+ ASSERT(existingHash);
+
+ if (!length)
+ return *static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ HashAndCharacters<UChar> buffer = { existingHash, s, length };
+ return addToStringTable<HashAndCharacters<UChar>, HashAndCharactersTranslator<UChar>>(buffer);
+}
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::add(const UChar* s)
+{
+ if (!s)
+ return nullptr;
+
+ unsigned length = 0;
+ while (s[length] != UChar(0))
+ ++length;
+
+ if (!length)
+ return static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ UCharBuffer buffer = { s, length };
+ return addToStringTable<UCharBuffer, UCharBufferTranslator>(buffer);
+}
+
+struct SubstringLocation {
+ StringImpl* baseString;
+ unsigned start;
+ unsigned length;
+};
+
+struct SubstringTranslator {
+ static void translate(StringImpl*& location, const SubstringLocation& buffer, unsigned hash)
+ {
+ location = &StringImpl::createSubstringSharingImpl(*buffer.baseString, buffer.start, buffer.length).leakRef();
+ location->setHash(hash);
+ location->setIsAtomic(true);
+ }
+};
+
+struct SubstringTranslator8 : SubstringTranslator {
+ static unsigned hash(const SubstringLocation& buffer)
+ {
+ return StringHasher::computeHashAndMaskTop8Bits(buffer.baseString->characters8() + buffer.start, buffer.length);
+ }
+
+ static bool equal(StringImpl* const& string, const SubstringLocation& buffer)
+ {
+ return WTF::equal(string, buffer.baseString->characters8() + buffer.start, buffer.length);
+ }
+};
+
+struct SubstringTranslator16 : SubstringTranslator {
+ static unsigned hash(const SubstringLocation& buffer)
+ {
+ return StringHasher::computeHashAndMaskTop8Bits(buffer.baseString->characters16() + buffer.start, buffer.length);
+ }
+
+ static bool equal(StringImpl* const& string, const SubstringLocation& buffer)
+ {
+ return WTF::equal(string, buffer.baseString->characters16() + buffer.start, buffer.length);
+ }
+};
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::add(StringImpl* baseString, unsigned start, unsigned length)
+{
+ if (!baseString)
+ return nullptr;
+
+ if (!length || start >= baseString->length())
+ return static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ unsigned maxLength = baseString->length() - start;
+ if (length >= maxLength) {
+ if (!start)
+ return add(baseString);
+ length = maxLength;
+ }
+
+ SubstringLocation buffer = { baseString, start, length };
+ if (baseString->is8Bit())
+ return addToStringTable<SubstringLocation, SubstringTranslator8>(buffer);
+ return addToStringTable<SubstringLocation, SubstringTranslator16>(buffer);
+}
+
+typedef HashTranslatorCharBuffer<LChar> LCharBuffer;
+struct LCharBufferTranslator {
+ static unsigned hash(const LCharBuffer& buf)
+ {
+ return StringHasher::computeHashAndMaskTop8Bits(buf.s, buf.length);
+ }
+
+ static bool equal(StringImpl* const& str, const LCharBuffer& buf)
+ {
+ return WTF::equal(str, buf.s, buf.length);
+ }
+
+ static void translate(StringImpl*& location, const LCharBuffer& buf, unsigned hash)
+ {
+ location = &StringImpl::create(buf.s, buf.length).leakRef();
+ location->setHash(hash);
+ location->setIsAtomic(true);
+ }
+};
+
+typedef HashTranslatorCharBuffer<char> CharBuffer;
+struct CharBufferFromLiteralDataTranslator {
+ static unsigned hash(const CharBuffer& buf)
+ {
+ return StringHasher::computeHashAndMaskTop8Bits(reinterpret_cast<const LChar*>(buf.s), buf.length);
+ }
+
+ static bool equal(StringImpl* const& str, const CharBuffer& buf)
+ {
+ return WTF::equal(str, buf.s, buf.length);
+ }
+
+ static void translate(StringImpl*& location, const CharBuffer& buf, unsigned hash)
+ {
+ location = &StringImpl::createFromLiteral(buf.s, buf.length).leakRef();
+ location->setHash(hash);
+ location->setIsAtomic(true);
+ }
+};
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::add(const LChar* s, unsigned length)
+{
+ if (!s)
+ return nullptr;
+
+ if (!length)
+ return static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ LCharBuffer buffer = { s, length };
+ return addToStringTable<LCharBuffer, LCharBufferTranslator>(buffer);
+}
+
+Ref<AtomicStringImpl> AtomicStringImpl::addLiteral(const char* characters, unsigned length)
+{
+ ASSERT(characters);
+ ASSERT(length);
+
+ CharBuffer buffer = { characters, length };
+ return addToStringTable<CharBuffer, CharBufferFromLiteralDataTranslator>(buffer);
+}
+
+static inline Ref<AtomicStringImpl> addSubstring(AtomicStringTableLocker& locker, StringTableImpl& atomicStringTable, StringImpl& base)
+{
+ ASSERT(base.length());
+ ASSERT(base.isSymbol() || base.isStatic());
+
+ SubstringLocation buffer = { &base, 0, base.length() };
+ if (base.is8Bit())
+ return addToStringTable<SubstringLocation, SubstringTranslator8>(locker, atomicStringTable, buffer);
+ return addToStringTable<SubstringLocation, SubstringTranslator16>(locker, atomicStringTable, buffer);
+}
+
+static inline Ref<AtomicStringImpl> addSubstring(StringImpl& base)
+{
+ AtomicStringTableLocker locker;
+ return addSubstring(locker, stringTable(), base);
+}
+
+Ref<AtomicStringImpl> AtomicStringImpl::addSlowCase(StringImpl& string)
+{
+ if (!string.length())
+ return *static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ if (string.isSymbol() || string.isStatic())
+ return addSubstring(string);
+
+ ASSERT_WITH_MESSAGE(!string.isAtomic(), "AtomicStringImpl should not hit the slow case if the string is already atomic.");
+
+ AtomicStringTableLocker locker;
+ auto addResult = stringTable().add(&string);
+
+ if (addResult.isNewEntry) {
+ ASSERT(*addResult.iterator == &string);
+ string.setIsAtomic(true);
+ }
+
+ return *static_cast<AtomicStringImpl*>(*addResult.iterator);
+}
+
+Ref<AtomicStringImpl> AtomicStringImpl::addSlowCase(AtomicStringTable& stringTable, StringImpl& string)
+{
+ if (!string.length())
+ return *static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ if (string.isSymbol() || string.isStatic()) {
+ AtomicStringTableLocker locker;
+ return addSubstring(locker, stringTable.table(), string);
+ }
+
+ ASSERT_WITH_MESSAGE(!string.isAtomic(), "AtomicStringImpl should not hit the slow case if the string is already atomic.");
+
+ AtomicStringTableLocker locker;
+ auto addResult = stringTable.table().add(&string);
+
+ if (addResult.isNewEntry) {
+ ASSERT(*addResult.iterator == &string);
+ string.setIsAtomic(true);
+ }
+
+ return *static_cast<AtomicStringImpl*>(*addResult.iterator);
+}
+
+void AtomicStringImpl::remove(AtomicStringImpl* string)
+{
+ ASSERT(string->isAtomic());
+ AtomicStringTableLocker locker;
+ auto& atomicStringTable = stringTable();
+ auto iterator = atomicStringTable.find(string);
+ ASSERT_WITH_MESSAGE(iterator != atomicStringTable.end(), "The string being removed is atomic in the string table of an other thread!");
+ ASSERT(string == *iterator);
+ atomicStringTable.remove(iterator);
+}
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::lookUpSlowCase(StringImpl& string)
+{
+ ASSERT_WITH_MESSAGE(!string.isAtomic(), "AtomicStringImpls should return from the fast case.");
+
+ if (!string.length())
+ return static_cast<AtomicStringImpl*>(StringImpl::empty());
+
+ AtomicStringTableLocker locker;
+ auto& atomicStringTable = stringTable();
+ auto iterator = atomicStringTable.find(&string);
+ if (iterator != atomicStringTable.end())
+ return static_cast<AtomicStringImpl*>(*iterator);
+ return nullptr;
+}
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::addUTF8(const char* charactersStart, const char* charactersEnd)
+{
+ HashAndUTF8Characters buffer;
+ buffer.characters = charactersStart;
+ buffer.hash = calculateStringHashAndLengthFromUTF8MaskingTop8Bits(charactersStart, charactersEnd, buffer.length, buffer.utf16Length);
+
+ if (!buffer.hash)
+ return nullptr;
+
+ return addToStringTable<HashAndUTF8Characters, HashAndUTF8CharactersTranslator>(buffer);
+}
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::lookUpInternal(const LChar* characters, unsigned length)
+{
+ AtomicStringTableLocker locker;
+ auto& table = stringTable();
+
+ LCharBuffer buffer = { characters, length };
+ auto iterator = table.find<LCharBufferTranslator>(buffer);
+ if (iterator != table.end())
+ return static_cast<AtomicStringImpl*>(*iterator);
+ return nullptr;
+}
+
+RefPtr<AtomicStringImpl> AtomicStringImpl::lookUpInternal(const UChar* characters, unsigned length)
+{
+ AtomicStringTableLocker locker;
+ auto& table = stringTable();
+
+ UCharBuffer buffer = { characters, length };
+ auto iterator = table.find<UCharBufferTranslator>(buffer);
+ if (iterator != table.end())
+ return static_cast<AtomicStringImpl*>(*iterator);
+ return nullptr;
+}
+
+#if !ASSERT_DISABLED
+bool AtomicStringImpl::isInAtomicStringTable(StringImpl* string)
+{
+ AtomicStringTableLocker locker;
+ return stringTable().contains(string);
+}
+#endif
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/text/AtomicStringImpl.h b/Source/WTF/wtf/text/AtomicStringImpl.h
index 45114aca5..1cde4b0ed 100644
--- a/Source/WTF/wtf/text/AtomicStringImpl.h
+++ b/Source/WTF/wtf/text/AtomicStringImpl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Apple Computer, Inc.
+ * Copyright (C) 2006 Apple Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -21,18 +21,97 @@
#ifndef AtomicStringImpl_h
#define AtomicStringImpl_h
-#include <wtf/text/StringImpl.h>
+#include <wtf/text/UniquedStringImpl.h>
namespace WTF {
-class AtomicStringImpl : public StringImpl
-{
+class AtomicStringTable;
+
+class AtomicStringImpl : public UniquedStringImpl {
public:
- AtomicStringImpl() : StringImpl(0) {}
+ static RefPtr<AtomicStringImpl> lookUp(LChar* characters, unsigned length)
+ {
+ return lookUpInternal(characters, length);
+ }
+ static RefPtr<AtomicStringImpl> lookUp(UChar* characters, unsigned length)
+ {
+ return lookUpInternal(characters, length);
+ }
+ static RefPtr<AtomicStringImpl> lookUp(StringImpl* string)
+ {
+ if (!string || string->isAtomic())
+ return static_cast<AtomicStringImpl*>(string);
+ return lookUpSlowCase(*string);
+ }
+
+ static void remove(AtomicStringImpl*);
+
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> add(const LChar*);
+ ALWAYS_INLINE static RefPtr<AtomicStringImpl> add(const char* s) { return add(reinterpret_cast<const LChar*>(s)); };
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> add(const LChar*, unsigned length);
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> add(const UChar*, unsigned length);
+ ALWAYS_INLINE static RefPtr<AtomicStringImpl> add(const char* s, unsigned length) { return add(reinterpret_cast<const LChar*>(s), length); };
+ WTF_EXPORT_STRING_API static Ref<AtomicStringImpl> add(const UChar*, unsigned length, unsigned existingHash);
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> add(const UChar*);
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> add(StringImpl*, unsigned offset, unsigned length);
+ ALWAYS_INLINE static RefPtr<AtomicStringImpl> add(StringImpl* string)
+ {
+ if (!string)
+ return static_cast<AtomicStringImpl*>(string);
+ return add(*string);
+ }
+ WTF_EXPORT_STRING_API static Ref<AtomicStringImpl> addLiteral(const char* characters, unsigned length);
+
+ // Returns null if the input data contains an invalid UTF-8 sequence.
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> addUTF8(const char* start, const char* end);
+#if USE(CF)
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> add(CFStringRef);
+#endif
+
+ template<typename StringTableProvider>
+ ALWAYS_INLINE static RefPtr<AtomicStringImpl> addWithStringTableProvider(StringTableProvider& stringTableProvider, StringImpl* string)
+ {
+ if (!string)
+ return nullptr;
+ return add(*stringTableProvider.atomicStringTable(), *string);
+ }
+
+#if !ASSERT_DISABLED
+ WTF_EXPORT_STRING_API static bool isInAtomicStringTable(StringImpl*);
+#endif
+
+private:
+ AtomicStringImpl() = delete;
+
+ ALWAYS_INLINE static Ref<AtomicStringImpl> add(StringImpl& string)
+ {
+ if (string.isAtomic()) {
+ ASSERT_WITH_MESSAGE(!string.length() || isInAtomicStringTable(&string), "The atomic string comes from an other thread!");
+ return static_cast<AtomicStringImpl&>(string);
+ }
+ return addSlowCase(string);
+ }
+
+ ALWAYS_INLINE static Ref<AtomicStringImpl> add(AtomicStringTable& stringTable, StringImpl& string)
+ {
+ if (string.isAtomic()) {
+ ASSERT_WITH_MESSAGE(!string.length() || isInAtomicStringTable(&string), "The atomic string comes from an other thread!");
+ return static_cast<AtomicStringImpl&>(string);
+ }
+ return addSlowCase(stringTable, string);
+ }
+
+ WTF_EXPORT_STRING_API static Ref<AtomicStringImpl> addSlowCase(StringImpl&);
+ WTF_EXPORT_STRING_API static Ref<AtomicStringImpl> addSlowCase(AtomicStringTable&, StringImpl&);
+
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> lookUpSlowCase(StringImpl&);
+
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> lookUpInternal(const LChar*, unsigned length);
+ WTF_EXPORT_STRING_API static RefPtr<AtomicStringImpl> lookUpInternal(const UChar*, unsigned length);
};
#if !ASSERT_DISABLED
-// AtomicStringImpls created from StaticASCIILiteral will ASSERT
+// AtomicStringImpls created from StaticStringImpl will ASSERT
// in the generic ValueCheck<T>::checkConsistency
// as they are not allocated by fastMalloc.
// We don't currently have any way to detect that case
diff --git a/Source/WTF/wtf/text/AtomicStringTable.cpp b/Source/WTF/wtf/text/AtomicStringTable.cpp
index d961b17e2..fe8a4884d 100644
--- a/Source/WTF/wtf/text/AtomicStringTable.cpp
+++ b/Source/WTF/wtf/text/AtomicStringTable.cpp
@@ -37,25 +37,28 @@ void AtomicStringTable::create(WTFThreadData& data)
bool currentThreadIsWebThread = isWebThread();
if (currentThreadIsWebThread || isUIThread())
- data.m_atomicStringTable = sharedStringTable;
+ data.m_defaultAtomicStringTable = sharedStringTable;
else
- data.m_atomicStringTable = new AtomicStringTable;
+ data.m_defaultAtomicStringTable = new AtomicStringTable;
// We do the following so that its destruction happens only
// once - on the main UI thread.
if (!currentThreadIsWebThread)
data.m_atomicStringTableDestructor = AtomicStringTable::destroy;
#else
- data.m_atomicStringTable = new AtomicStringTable;
+ data.m_defaultAtomicStringTable = new AtomicStringTable;
data.m_atomicStringTableDestructor = AtomicStringTable::destroy;
#endif // USE(WEB_THREAD)
}
+AtomicStringTable::~AtomicStringTable()
+{
+ for (auto* string : m_table)
+ string->setIsAtomic(false);
+}
+
void AtomicStringTable::destroy(AtomicStringTable* table)
{
- HashSet<StringImpl*>::iterator end = table->m_table.end();
- for (HashSet<StringImpl*>::iterator iter = table->m_table.begin(); iter != end; ++iter)
- (*iter)->setIsAtomic(false);
delete table;
}
diff --git a/Source/WTF/wtf/text/AtomicStringTable.h b/Source/WTF/wtf/text/AtomicStringTable.h
index 57826cb71..71d956d27 100644
--- a/Source/WTF/wtf/text/AtomicStringTable.h
+++ b/Source/WTF/wtf/text/AtomicStringTable.h
@@ -33,6 +33,7 @@ class StringImpl;
class AtomicStringTable {
WTF_MAKE_FAST_ALLOCATED;
public:
+ WTF_EXPORT_PRIVATE ~AtomicStringTable();
static void create(WTFThreadData&);
HashSet<StringImpl*>& table() { return m_table; }
diff --git a/Source/WTF/wtf/text/Base64.cpp b/Source/WTF/wtf/text/Base64.cpp
index 2323f3fa3..714a7ead4 100644
--- a/Source/WTF/wtf/text/Base64.cpp
+++ b/Source/WTF/wtf/text/Base64.cpp
@@ -1,7 +1,7 @@
/*
Copyright (C) 2000-2001 Dawit Alemayehu <adawit@kde.org>
Copyright (C) 2006 Alexey Proskuryakov <ap@webkit.org>
- Copyright (C) 2007, 2008, 2013 Apple Inc. All rights reserved.
+ Copyright (C) 2007, 2008, 2013, 2016 Apple Inc. All rights reserved.
Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
This program is free software; you can redistribute it and/or modify
@@ -92,7 +92,7 @@ static const char base64URLDecMap[128] = {
0x31, 0x32, 0x33, nonAlphabet, nonAlphabet, nonAlphabet, nonAlphabet, nonAlphabet
};
-static inline void base64EncodeInternal(const char* data, unsigned len, Vector<char>& out, Base64EncodePolicy policy, const char (&encodeMap)[64])
+static inline void base64EncodeInternal(const unsigned char* data, unsigned len, Vector<char>& out, Base64EncodePolicy policy, const char (&encodeMap)[64])
{
out.clear();
if (!len)
@@ -160,29 +160,29 @@ static inline void base64EncodeInternal(const char* data, unsigned len, Vector<c
String base64Encode(const void* data, unsigned length, Base64EncodePolicy policy)
{
Vector<char> result;
- base64EncodeInternal(static_cast<const char*>(data), length, result, policy, base64EncMap);
+ base64EncodeInternal(static_cast<const unsigned char*>(data), length, result, policy, base64EncMap);
return String(result.data(), result.size());
}
void base64Encode(const void* data, unsigned len, Vector<char>& out, Base64EncodePolicy policy)
{
- base64EncodeInternal(static_cast<const char*>(data), len, out, policy, base64EncMap);
+ base64EncodeInternal(static_cast<const unsigned char*>(data), len, out, policy, base64EncMap);
}
String base64URLEncode(const void* data, unsigned length)
{
Vector<char> result;
- base64EncodeInternal(static_cast<const char*>(data), length, result, Base64URLPolicy, base64URLEncMap);
+ base64EncodeInternal(static_cast<const unsigned char*>(data), length, result, Base64URLPolicy, base64URLEncMap);
return String(result.data(), result.size());
}
void base64URLEncode(const void* data, unsigned len, Vector<char>& out)
{
- base64EncodeInternal(static_cast<const char*>(data), len, out, Base64URLPolicy, base64URLEncMap);
+ base64EncodeInternal(static_cast<const unsigned char*>(data), len, out, Base64URLPolicy, base64URLEncMap);
}
template<typename T>
-static inline bool base64DecodeInternal(const T* data, unsigned length, Vector<char>& out, Base64DecodePolicy policy, const char (&decodeMap)[128])
+static inline bool base64DecodeInternal(const T* data, unsigned length, SignedOrUnsignedCharVectorAdapter& out, unsigned options, const char (&decodeMap)[128])
{
out.clear();
if (!length)
@@ -192,29 +192,47 @@ static inline bool base64DecodeInternal(const T* data, unsigned length, Vector<c
unsigned equalsSignCount = 0;
unsigned outLength = 0;
+ bool hadError = false;
for (unsigned idx = 0; idx < length; ++idx) {
unsigned ch = data[idx];
if (ch == '=') {
++equalsSignCount;
- // There should be no padding if length is a multiple of 4, and there
- // should never be more than 2 padding characters.
- if (policy == Base64FailOnInvalidCharacterOrExcessPadding && (length % 4 || equalsSignCount > 2))
- return false;
+ // There should never be more than 2 padding characters.
+ if (options & Base64ValidatePadding && equalsSignCount > 2) {
+ hadError = true;
+ break;
+ }
} else {
char decodedCharacter = ch < WTF_ARRAY_LENGTH(decodeMap) ? decodeMap[ch] : nonAlphabet;
if (decodedCharacter != nonAlphabet) {
- if (equalsSignCount)
- return false;
- out[outLength] = decodedCharacter;
- ++outLength;
- } else if (policy == Base64FailOnInvalidCharacterOrExcessPadding || policy == Base64FailOnInvalidCharacter || (policy == Base64IgnoreWhitespace && !isSpaceOrNewline(ch)))
- return false;
+ if (equalsSignCount) {
+ hadError = true;
+ break;
+ }
+ out[outLength++] = decodedCharacter;
+ } else if (!(options & Base64IgnoreSpacesAndNewLines) || !isSpaceOrNewline(ch)) {
+ hadError = true;
+ break;
+ }
}
}
+ // Make sure we shrink back the Vector before returning. outLength may be shorter than expected
+ // in case of error or in case of ignored spaces.
+ if (outLength < out.size())
+ out.shrink(outLength);
+
+ if (hadError)
+ return false;
+
if (!outLength)
return !equalsSignCount;
+ // The should be no padding if length is a multiple of 4.
+ // We use (outLength + equalsSignCount) instead of length because we don't want to account for ignored characters (i.e. spaces).
+ if (options & Base64ValidatePadding && equalsSignCount && (outLength + equalsSignCount) % 4)
+ return false;
+
// Valid data is (n * 4 + [0,2,3]) characters long.
if ((outLength % 4) == 1)
return false;
@@ -248,12 +266,15 @@ static inline bool base64DecodeInternal(const T* data, unsigned length, Vector<c
return true;
}
-bool base64Decode(const String& in, SignedOrUnsignedCharVectorAdapter out, Base64DecodePolicy policy)
+bool base64Decode(const String& in, SignedOrUnsignedCharVectorAdapter out, unsigned options)
{
- return base64DecodeInternal<UChar>(in.deprecatedCharacters(), in.length(), out, policy, base64DecMap);
+ unsigned length = in.length();
+ if (!length || in.is8Bit())
+ return base64DecodeInternal(in.characters8(), length, out, options, base64DecMap);
+ return base64DecodeInternal(in.characters16(), length, out, options, base64DecMap);
}
-bool base64Decode(const Vector<char>& in, SignedOrUnsignedCharVectorAdapter out, Base64DecodePolicy policy)
+bool base64Decode(const Vector<char>& in, SignedOrUnsignedCharVectorAdapter out, unsigned options)
{
out.clear();
@@ -261,17 +282,20 @@ bool base64Decode(const Vector<char>& in, SignedOrUnsignedCharVectorAdapter out,
if (in.size() > UINT_MAX)
return false;
- return base64DecodeInternal<char>(in.data(), in.size(), out, policy, base64DecMap);
+ return base64DecodeInternal(reinterpret_cast<const LChar*>(in.data()), in.size(), out, options, base64DecMap);
}
-bool base64Decode(const char* data, unsigned len, SignedOrUnsignedCharVectorAdapter out, Base64DecodePolicy policy)
+bool base64Decode(const char* data, unsigned len, SignedOrUnsignedCharVectorAdapter out, unsigned options)
{
- return base64DecodeInternal<char>(data, len, out, policy, base64DecMap);
+ return base64DecodeInternal(reinterpret_cast<const LChar*>(data), len, out, options, base64DecMap);
}
bool base64URLDecode(const String& in, SignedOrUnsignedCharVectorAdapter out)
{
- return base64DecodeInternal<UChar>(in.deprecatedCharacters(), in.length(), out, Base64FailOnInvalidCharacter, base64URLDecMap);
+ unsigned length = in.length();
+ if (!length || in.is8Bit())
+ return base64DecodeInternal(in.characters8(), length, out, Base64Default, base64URLDecMap);
+ return base64DecodeInternal(in.characters16(), length, out, Base64Default, base64URLDecMap);
}
bool base64URLDecode(const Vector<char>& in, SignedOrUnsignedCharVectorAdapter out)
@@ -282,12 +306,12 @@ bool base64URLDecode(const Vector<char>& in, SignedOrUnsignedCharVectorAdapter o
if (in.size() > UINT_MAX)
return false;
- return base64DecodeInternal<char>(in.data(), in.size(), out, Base64FailOnInvalidCharacter, base64URLDecMap);
+ return base64DecodeInternal(reinterpret_cast<const LChar*>(in.data()), in.size(), out, Base64Default, base64URLDecMap);
}
bool base64URLDecode(const char* data, unsigned len, SignedOrUnsignedCharVectorAdapter out)
{
- return base64DecodeInternal<char>(data, len, out, Base64FailOnInvalidCharacter, base64URLDecMap);
+ return base64DecodeInternal(reinterpret_cast<const LChar*>(data), len, out, Base64Default, base64URLDecMap);
}
} // namespace WTF
diff --git a/Source/WTF/wtf/text/Base64.h b/Source/WTF/wtf/text/Base64.h
index 1dfcf2698..820557558 100644
--- a/Source/WTF/wtf/text/Base64.h
+++ b/Source/WTF/wtf/text/Base64.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2006 Alexey Proskuryakov <ap@webkit.org>
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -12,10 +12,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -40,22 +40,70 @@ enum Base64EncodePolicy {
Base64URLPolicy // No padding, no LFs.
};
-enum Base64DecodePolicy {
- Base64FailOnInvalidCharacterOrExcessPadding,
- Base64FailOnInvalidCharacter,
- Base64IgnoreWhitespace,
- Base64IgnoreInvalidCharacters
+enum Base64DecodeOptions {
+ Base64Default = 0,
+ Base64ValidatePadding = 1 << 0,
+ Base64IgnoreSpacesAndNewLines = 1 << 1,
};
class SignedOrUnsignedCharVectorAdapter {
public:
- SignedOrUnsignedCharVectorAdapter(Vector<char>& vector) { m_vector.c = &vector; }
- SignedOrUnsignedCharVectorAdapter(Vector<uint8_t>& vector) { m_vector.u = &vector; }
-
- operator Vector<char>&() { return *m_vector.c; }
- void clear() { m_vector.c->clear(); }
+ SignedOrUnsignedCharVectorAdapter(Vector<char>& vector)
+ : m_isSigned(true)
+ {
+ m_vector.c = &vector;
+ }
+ SignedOrUnsignedCharVectorAdapter(Vector<uint8_t>& vector)
+ : m_isSigned(false)
+ {
+ m_vector.u = &vector;
+ }
+
+ uint8_t* data()
+ {
+ if (m_isSigned)
+ return reinterpret_cast<uint8_t*>(m_vector.c->data());
+ return m_vector.u->data();
+ }
+
+ size_t size() const
+ {
+ if (m_isSigned)
+ return m_vector.c->size();
+ return m_vector.u->size();
+ }
+
+ void clear()
+ {
+ if (m_isSigned) {
+ m_vector.c->clear();
+ return;
+ }
+ m_vector.u->clear();
+ }
+
+ void grow(size_t size)
+ {
+ if (m_isSigned) {
+ m_vector.c->grow(size);
+ return;
+ }
+ m_vector.u->grow(size);
+ }
+
+ void shrink(size_t size)
+ {
+ if (m_isSigned) {
+ m_vector.c->shrink(size);
+ return;
+ }
+ m_vector.u->shrink(size);
+ }
+
+ uint8_t& operator[](size_t position) { return data()[position]; }
private:
+ bool m_isSigned;
union {
Vector<char>* c;
Vector<uint8_t>* u;
@@ -64,14 +112,32 @@ private:
class ConstSignedOrUnsignedCharVectorAdapter {
public:
- ConstSignedOrUnsignedCharVectorAdapter(const Vector<char>& vector) { m_vector.c = &vector; }
- ConstSignedOrUnsignedCharVectorAdapter(const Vector<uint8_t>& vector) { m_vector.u = &vector; }
-
- operator const Vector<char>&() { return *m_vector.c; }
- const char* data() const { return m_vector.c->data(); }
- size_t size() const { return m_vector.c->size(); }
+ ConstSignedOrUnsignedCharVectorAdapter(const Vector<char>& vector)
+ : m_isSigned(false)
+ {
+ m_vector.c = &vector;
+ }
+ ConstSignedOrUnsignedCharVectorAdapter(const Vector<uint8_t>& vector)
+ : m_isSigned(true)
+ {
+ m_vector.u = &vector;
+ }
+
+ const uint8_t* data() const
+ {
+ if (m_isSigned)
+ return reinterpret_cast<const uint8_t*>(m_vector.c->data());
+ return m_vector.u->data();
+ }
+ size_t size() const
+ {
+ if (m_isSigned)
+ return m_vector.c->size();
+ return m_vector.u->size();
+ }
private:
+ bool m_isSigned;
union {
const Vector<char>* c;
const Vector<uint8_t>* u;
@@ -79,15 +145,15 @@ private:
};
WTF_EXPORT_PRIVATE void base64Encode(const void*, unsigned, Vector<char>&, Base64EncodePolicy = Base64DoNotInsertLFs);
-WTF_EXPORT_PRIVATE void base64Encode(ConstSignedOrUnsignedCharVectorAdapter, Vector<char>&, Base64EncodePolicy = Base64DoNotInsertLFs);
-WTF_EXPORT_PRIVATE void base64Encode(const CString&, Vector<char>&, Base64EncodePolicy = Base64DoNotInsertLFs);
+void base64Encode(ConstSignedOrUnsignedCharVectorAdapter, Vector<char>&, Base64EncodePolicy = Base64DoNotInsertLFs);
+void base64Encode(const CString&, Vector<char>&, Base64EncodePolicy = Base64DoNotInsertLFs);
WTF_EXPORT_PRIVATE String base64Encode(const void*, unsigned, Base64EncodePolicy = Base64DoNotInsertLFs);
-WTF_EXPORT_PRIVATE String base64Encode(ConstSignedOrUnsignedCharVectorAdapter, Base64EncodePolicy = Base64DoNotInsertLFs);
-WTF_EXPORT_PRIVATE String base64Encode(const CString&, Base64EncodePolicy = Base64DoNotInsertLFs);
+String base64Encode(ConstSignedOrUnsignedCharVectorAdapter, Base64EncodePolicy = Base64DoNotInsertLFs);
+String base64Encode(const CString&, Base64EncodePolicy = Base64DoNotInsertLFs);
-WTF_EXPORT_PRIVATE bool base64Decode(const String&, SignedOrUnsignedCharVectorAdapter, Base64DecodePolicy = Base64FailOnInvalidCharacter);
-WTF_EXPORT_PRIVATE bool base64Decode(const Vector<char>&, SignedOrUnsignedCharVectorAdapter, Base64DecodePolicy = Base64FailOnInvalidCharacter);
-WTF_EXPORT_PRIVATE bool base64Decode(const char*, unsigned, SignedOrUnsignedCharVectorAdapter, Base64DecodePolicy = Base64FailOnInvalidCharacter);
+WTF_EXPORT_PRIVATE bool base64Decode(const String&, SignedOrUnsignedCharVectorAdapter, unsigned options = Base64Default);
+WTF_EXPORT_PRIVATE bool base64Decode(const Vector<char>&, SignedOrUnsignedCharVectorAdapter, unsigned options = Base64Default);
+WTF_EXPORT_PRIVATE bool base64Decode(const char*, unsigned, SignedOrUnsignedCharVectorAdapter, unsigned options = Base64Default);
inline void base64Encode(ConstSignedOrUnsignedCharVectorAdapter in, Vector<char>& out, Base64EncodePolicy policy)
{
@@ -115,11 +181,12 @@ inline String base64Encode(const CString& in, Base64EncodePolicy policy)
// ======================================================================================
WTF_EXPORT_PRIVATE void base64URLEncode(const void*, unsigned, Vector<char>&);
-WTF_EXPORT_PRIVATE void base64URLEncode(ConstSignedOrUnsignedCharVectorAdapter, Vector<char>&);
-WTF_EXPORT_PRIVATE void base64URLEncode(const CString&, Vector<char>&);
+void base64URLEncode(ConstSignedOrUnsignedCharVectorAdapter, Vector<char>&);
+void base64URLEncode(const CString&, Vector<char>&);
+
WTF_EXPORT_PRIVATE String base64URLEncode(const void*, unsigned);
-WTF_EXPORT_PRIVATE String base64URLEncode(ConstSignedOrUnsignedCharVectorAdapter);
-WTF_EXPORT_PRIVATE String base64URLEncode(const CString&);
+String base64URLEncode(ConstSignedOrUnsignedCharVectorAdapter);
+String base64URLEncode(const CString&);
WTF_EXPORT_PRIVATE bool base64URLDecode(const String&, SignedOrUnsignedCharVectorAdapter);
WTF_EXPORT_PRIVATE bool base64URLDecode(const Vector<char>&, SignedOrUnsignedCharVectorAdapter);
@@ -150,11 +217,8 @@ inline String base64URLEncode(const CString& in)
using WTF::Base64EncodePolicy;
using WTF::Base64DoNotInsertLFs;
using WTF::Base64InsertLFs;
-using WTF::Base64DecodePolicy;
-using WTF::Base64FailOnInvalidCharacterOrExcessPadding;
-using WTF::Base64FailOnInvalidCharacter;
-using WTF::Base64IgnoreWhitespace;
-using WTF::Base64IgnoreInvalidCharacters;
+using WTF::Base64ValidatePadding;
+using WTF::Base64IgnoreSpacesAndNewLines;
using WTF::base64Encode;
using WTF::base64Decode;
using WTF::base64URLDecode;
diff --git a/Source/WTF/wtf/text/CString.cpp b/Source/WTF/wtf/text/CString.cpp
index e44a96e80..21b37eba8 100644
--- a/Source/WTF/wtf/text/CString.cpp
+++ b/Source/WTF/wtf/text/CString.cpp
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -28,18 +28,18 @@
#include "CString.h"
#include <string.h>
-#include <wtf/StringHasher.h>
+#include <wtf/Hasher.h>
namespace WTF {
-PassRefPtr<CStringBuffer> CStringBuffer::createUninitialized(size_t length)
+Ref<CStringBuffer> CStringBuffer::createUninitialized(size_t length)
{
RELEASE_ASSERT(length < (std::numeric_limits<unsigned>::max() - sizeof(CStringBuffer)));
// The +1 is for the terminating null character.
size_t size = sizeof(CStringBuffer) + length + 1;
CStringBuffer* stringBuffer = static_cast<CStringBuffer*>(fastMalloc(size));
- return adoptRef(new (NotNull, stringBuffer) CStringBuffer(length));
+ return adoptRef(*new (NotNull, stringBuffer) CStringBuffer(length));
}
CString::CString(const char* str)
@@ -76,7 +76,7 @@ char* CString::mutableData()
return 0;
return m_buffer->mutableData();
}
-
+
CString CString::newUninitialized(size_t length, char*& characterBuffer)
{
CString result;
@@ -92,7 +92,7 @@ void CString::copyBufferIfNeeded()
if (!m_buffer || m_buffer->hasOneRef())
return;
- RefPtr<CStringBuffer> buffer = m_buffer.release();
+ RefPtr<CStringBuffer> buffer = WTFMove(m_buffer);
size_t length = buffer->length();
m_buffer = CStringBuffer::createUninitialized(length);
memcpy(m_buffer->mutableData(), buffer->data(), length + 1);
diff --git a/Source/WTF/wtf/text/CString.h b/Source/WTF/wtf/text/CString.h
index 1941a2dbe..4d8d80399 100644
--- a/Source/WTF/wtf/text/CString.h
+++ b/Source/WTF/wtf/text/CString.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -28,7 +28,7 @@
#include <wtf/HashFunctions.h>
#include <wtf/HashTraits.h>
-#include <wtf/PassRefPtr.h>
+#include <wtf/Ref.h>
#include <wtf/RefCounted.h>
namespace WTF {
@@ -43,7 +43,7 @@ public:
private:
friend class CString;
- static PassRefPtr<CStringBuffer> createUninitialized(size_t length);
+ static Ref<CStringBuffer> createUninitialized(size_t length);
CStringBuffer(size_t length) : m_length(length) { }
char* mutableData() { return reinterpret_cast_ptr<char*>(this + 1); }
diff --git a/Source/WTF/wtf/text/IntegerToStringConversion.h b/Source/WTF/wtf/text/IntegerToStringConversion.h
index 649fb05ef..563614d4f 100644
--- a/Source/WTF/wtf/text/IntegerToStringConversion.h
+++ b/Source/WTF/wtf/text/IntegerToStringConversion.h
@@ -22,8 +22,6 @@
#ifndef IntegerToStringConversion_h
#define IntegerToStringConversion_h
-#include "StringBuilder.h"
-
namespace WTF {
enum PositiveOrNegativeNumber {
@@ -33,22 +31,6 @@ enum PositiveOrNegativeNumber {
template<typename T> struct IntegerToStringConversionTrait;
-template<> struct IntegerToStringConversionTrait<AtomicString> {
- typedef AtomicString ReturnType;
- typedef void AdditionalArgumentType;
- static ReturnType flush(LChar* characters, unsigned length, void*) { return AtomicString(characters, length); }
-};
-template<> struct IntegerToStringConversionTrait<String> {
- typedef String ReturnType;
- typedef void AdditionalArgumentType;
- static ReturnType flush(LChar* characters, unsigned length, void*) { return String(characters, length); }
-};
-template<> struct IntegerToStringConversionTrait<StringBuilder> {
- typedef void ReturnType;
- typedef StringBuilder AdditionalArgumentType;
- static ReturnType flush(LChar* characters, unsigned length, StringBuilder* stringBuilder) { stringBuilder->append(characters, length); }
-};
-
template<typename T, typename UnsignedIntegerType, PositiveOrNegativeNumber NumberType, typename AdditionalArgumentType>
static typename IntegerToStringConversionTrait<T>::ReturnType numberToStringImpl(UnsignedIntegerType number, AdditionalArgumentType additionalArgument)
{
@@ -81,6 +63,72 @@ inline typename IntegerToStringConversionTrait<T>::ReturnType numberToStringUnsi
return numberToStringImpl<T, UnsignedIntegerType, PositiveNumber>(number, additionalArgument);
}
+
+template<typename CharacterType, typename UnsignedIntegerType, PositiveOrNegativeNumber NumberType>
+static void writeNumberToBufferImpl(UnsignedIntegerType number, CharacterType* destination)
+{
+ LChar buf[sizeof(UnsignedIntegerType) * 3 + 1];
+ LChar* end = buf + WTF_ARRAY_LENGTH(buf);
+ LChar* p = end;
+
+ do {
+ *--p = static_cast<LChar>((number % 10) + '0');
+ number /= 10;
+ } while (number);
+
+ if (NumberType == NegativeNumber)
+ *--p = '-';
+
+ while (p < end)
+ *destination++ = static_cast<CharacterType>(*p++);
+}
+
+template<typename CharacterType, typename SignedIntegerType>
+inline void writeNumberToBufferSigned(SignedIntegerType number, CharacterType* destination)
+{
+ if (number < 0)
+ return writeNumberToBufferImpl<CharacterType, typename std::make_unsigned<SignedIntegerType>::type, NegativeNumber>(-number, destination);
+ return writeNumberToBufferImpl<CharacterType, typename std::make_unsigned<SignedIntegerType>::type, PositiveNumber>(number, destination);
+}
+
+template<typename CharacterType, typename UnsignedIntegerType>
+inline void writeNumberToBufferUnsigned(UnsignedIntegerType number, CharacterType* destination)
+{
+ return writeNumberToBufferImpl<CharacterType, UnsignedIntegerType, PositiveNumber>(number, destination);
+}
+
+
+template<typename UnsignedIntegerType, PositiveOrNegativeNumber NumberType>
+static unsigned lengthOfNumberAsStringImpl(UnsignedIntegerType number)
+{
+ unsigned length = 0;
+
+ do {
+ ++length;
+ number /= 10;
+ } while (number);
+
+ if (NumberType == NegativeNumber)
+ ++length;
+
+ return length;
+}
+
+template<typename SignedIntegerType>
+inline unsigned lengthOfNumberAsStringSigned(SignedIntegerType number)
+{
+ if (number < 0)
+ return lengthOfNumberAsStringImpl<typename std::make_unsigned<SignedIntegerType>::type, NegativeNumber>(-number);
+ return lengthOfNumberAsStringImpl<typename std::make_unsigned<SignedIntegerType>::type, PositiveNumber>(number);
+}
+
+template<typename UnsignedIntegerType>
+inline unsigned lengthOfNumberAsStringUnsigned(UnsignedIntegerType number)
+{
+ return lengthOfNumberAsStringImpl<UnsignedIntegerType, PositiveNumber>(number);
+}
+
+
} // namespace WTF
#endif // IntegerToStringConversion_h
diff --git a/Source/WTF/wtf/text/LChar.h b/Source/WTF/wtf/text/LChar.h
index b7bb89794..4d31dafb9 100644
--- a/Source/WTF/wtf/text/LChar.h
+++ b/Source/WTF/wtf/text/LChar.h
@@ -10,17 +10,17 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
@@ -29,7 +29,7 @@
// A type to hold a single Latin-1 character.
// This type complements the UChar type that we get from the ICU library.
-// To parallel that type, we put it outside any namespace.
+// To parallel that type, we put this one in the global namespace.
typedef unsigned char LChar;
#endif
diff --git a/Source/WTF/wtf/text/LineBreakIteratorPoolICU.h b/Source/WTF/wtf/text/LineBreakIteratorPoolICU.h
new file mode 100644
index 000000000..0cbae4030
--- /dev/null
+++ b/Source/WTF/wtf/text/LineBreakIteratorPoolICU.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "TextBreakIterator.h"
+#include <unicode/uloc.h>
+#include <wtf/HashMap.h>
+#include <wtf/NeverDestroyed.h>
+#include <wtf/ThreadSpecific.h>
+#include <wtf/text/AtomicString.h>
+
+namespace WTF {
+
+class LineBreakIteratorPool {
+ WTF_MAKE_NONCOPYABLE(LineBreakIteratorPool);
+public:
+ LineBreakIteratorPool() = default;
+
+ static LineBreakIteratorPool& sharedPool()
+ {
+ static NeverDestroyed<WTF::ThreadSpecific<LineBreakIteratorPool>> pool;
+ return *pool.get();
+ }
+
+ static AtomicString makeLocaleWithBreakKeyword(const AtomicString& locale, LineBreakIteratorMode mode)
+ {
+ // The uloc functions model locales as char*, so we have to downconvert our AtomicString.
+ auto utf8Locale = locale.string().utf8();
+ if (!utf8Locale.length())
+ return locale;
+ Vector<char> scratchBuffer(utf8Locale.length() + 11, 0);
+ memcpy(scratchBuffer.data(), utf8Locale.data(), utf8Locale.length());
+
+ const char* keywordValue = nullptr;
+ switch (mode) {
+ case LineBreakIteratorMode::Default:
+ // nullptr will cause any existing values to be removed.
+ break;
+ case LineBreakIteratorMode::Loose:
+ keywordValue = "loose";
+ break;
+ case LineBreakIteratorMode::Normal:
+ keywordValue = "normal";
+ break;
+ case LineBreakIteratorMode::Strict:
+ keywordValue = "strict";
+ break;
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ int32_t lengthNeeded = uloc_setKeywordValue("lb", keywordValue, scratchBuffer.data(), scratchBuffer.size(), &status);
+ if (U_SUCCESS(status))
+ return AtomicString::fromUTF8(scratchBuffer.data(), lengthNeeded);
+ if (status == U_BUFFER_OVERFLOW_ERROR) {
+ scratchBuffer.grow(lengthNeeded + 1);
+ memset(scratchBuffer.data() + utf8Locale.length(), 0, scratchBuffer.size() - utf8Locale.length());
+ status = U_ZERO_ERROR;
+ int32_t lengthNeeded2 = uloc_setKeywordValue("lb", keywordValue, scratchBuffer.data(), scratchBuffer.size(), &status);
+ if (!U_SUCCESS(status) || lengthNeeded != lengthNeeded2)
+ return locale;
+ return AtomicString::fromUTF8(scratchBuffer.data(), lengthNeeded);
+ }
+ return locale;
+ }
+
+ UBreakIterator* take(const AtomicString& locale, LineBreakIteratorMode mode)
+ {
+ auto localeWithOptionalBreakKeyword = makeLocaleWithBreakKeyword(locale, mode);
+
+ UBreakIterator* iterator = nullptr;
+ for (size_t i = 0; i < m_pool.size(); ++i) {
+ if (m_pool[i].first == localeWithOptionalBreakKeyword) {
+ iterator = m_pool[i].second;
+ m_pool.remove(i);
+ break;
+ }
+ }
+
+ if (!iterator) {
+ iterator = openLineBreakIterator(localeWithOptionalBreakKeyword);
+ if (!iterator)
+ return nullptr;
+ }
+
+ ASSERT(!m_vendedIterators.contains(iterator));
+ m_vendedIterators.add(iterator, localeWithOptionalBreakKeyword);
+ return iterator;
+ }
+
+ void put(UBreakIterator* iterator)
+ {
+ ASSERT(m_vendedIterators.contains(iterator));
+ if (m_pool.size() == capacity) {
+ closeLineBreakIterator(m_pool[0].second);
+ m_pool.remove(0);
+ }
+ m_pool.uncheckedAppend({ m_vendedIterators.take(iterator), iterator });
+ }
+
+private:
+ static constexpr size_t capacity = 4;
+
+ Vector<std::pair<AtomicString, UBreakIterator*>, capacity> m_pool;
+ HashMap<UBreakIterator*, AtomicString> m_vendedIterators;
+
+ friend WTF::ThreadSpecific<LineBreakIteratorPool>::operator LineBreakIteratorPool*();
+};
+
+}
diff --git a/Source/WTF/wtf/text/OrdinalNumber.h b/Source/WTF/wtf/text/OrdinalNumber.h
new file mode 100644
index 000000000..bb5d62d66
--- /dev/null
+++ b/Source/WTF/wtf/text/OrdinalNumber.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace WTF {
+
+// An abstract number of element in a sequence. The sequence has a first element.
+// This type should be used instead of integer because 2 contradicting traditions can
+// call a first element '0' or '1' which makes integer type ambiguous.
+class OrdinalNumber {
+public:
+ static OrdinalNumber beforeFirst() { return OrdinalNumber(-1); }
+ static OrdinalNumber fromZeroBasedInt(int zeroBasedInt) { return OrdinalNumber(zeroBasedInt); }
+ static OrdinalNumber fromOneBasedInt(int oneBasedInt) { return OrdinalNumber(oneBasedInt - 1); }
+
+ OrdinalNumber() : m_zeroBasedValue(0) { }
+
+ int zeroBasedInt() const { return m_zeroBasedValue; }
+ int oneBasedInt() const { return m_zeroBasedValue + 1; }
+
+ bool operator==(OrdinalNumber other) { return m_zeroBasedValue == other.m_zeroBasedValue; }
+ bool operator!=(OrdinalNumber other) { return !((*this) == other); }
+ bool operator>(OrdinalNumber other) { return m_zeroBasedValue > other.m_zeroBasedValue; }
+
+private:
+ OrdinalNumber(int zeroBasedInt) : m_zeroBasedValue(zeroBasedInt) { }
+ int m_zeroBasedValue;
+};
+
+}
+
+using WTF::OrdinalNumber;
diff --git a/Source/WTF/wtf/text/StringBuffer.h b/Source/WTF/wtf/text/StringBuffer.h
index 22e161101..f293d333d 100644
--- a/Source/WTF/wtf/text/StringBuffer.h
+++ b/Source/WTF/wtf/text/StringBuffer.h
@@ -30,8 +30,8 @@
#define StringBuffer_h
#include <wtf/Assertions.h>
-#include <wtf/unicode/Unicode.h>
#include <limits>
+#include <unicode/utypes.h>
namespace WTF {
diff --git a/Source/WTF/wtf/text/StringBuilder.cpp b/Source/WTF/wtf/text/StringBuilder.cpp
index c483ba146..436015a43 100644
--- a/Source/WTF/wtf/text/StringBuilder.cpp
+++ b/Source/WTF/wtf/text/StringBuilder.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2013, 2016 Apple Inc. All rights reserved.
* Copyright (C) 2012 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,6 +28,7 @@
#include "StringBuilder.h"
#include "IntegerToStringConversion.h"
+#include "MathExtras.h"
#include "WTFString.h"
#include <wtf/dtoa.h>
@@ -58,12 +59,7 @@ void StringBuilder::reifyString() const
if (m_length == m_buffer->length())
m_string = m_buffer.get();
else
- m_string = StringImpl::create(m_buffer, 0, m_length);
-
- if (m_buffer->has16BitShadow() && m_valid16BitShadowLength < m_length)
- m_buffer->upconvertCharacters(m_valid16BitShadowLength, m_length);
-
- m_valid16BitShadowLength = m_length;
+ m_string = StringImpl::createSubstringSharingImpl(*m_buffer, 0, m_length);
}
void StringBuilder::resize(unsigned newSize)
@@ -84,6 +80,7 @@ void StringBuilder::resize(unsigned newSize)
allocateBuffer(m_buffer->characters16(), m_buffer->length());
}
m_length = newSize;
+ ASSERT(m_buffer->length() >= m_length);
return;
}
@@ -92,7 +89,7 @@ void StringBuilder::resize(unsigned newSize)
ASSERT(m_length == m_string.length());
ASSERT(newSize < m_string.length());
m_length = newSize;
- m_string = StringImpl::create(m_string.impl(), 0, newSize);
+ m_string = StringImpl::createSubstringSharingImpl(*m_string.impl(), 0, newSize);
}
// Allocate a new 8 bit buffer, copying in currentCharacters (these may come from either m_string
@@ -101,12 +98,13 @@ void StringBuilder::allocateBuffer(const LChar* currentCharacters, unsigned requ
{
ASSERT(m_is8Bit);
// Copy the existing data into a new buffer, set result to point to the end of the existing data.
- RefPtr<StringImpl> buffer = StringImpl::createUninitialized(requiredLength, m_bufferCharacters8);
+ auto buffer = StringImpl::createUninitialized(requiredLength, m_bufferCharacters8);
memcpy(m_bufferCharacters8, currentCharacters, static_cast<size_t>(m_length) * sizeof(LChar)); // This can't overflow.
// Update the builder state.
- m_buffer = buffer.release();
+ m_buffer = WTFMove(buffer);
m_string = String();
+ ASSERT(m_buffer->length() == requiredLength);
}
// Allocate a new 16 bit buffer, copying in currentCharacters (these may come from either m_string
@@ -115,12 +113,13 @@ void StringBuilder::allocateBuffer(const UChar* currentCharacters, unsigned requ
{
ASSERT(!m_is8Bit);
// Copy the existing data into a new buffer, set result to point to the end of the existing data.
- RefPtr<StringImpl> buffer = StringImpl::createUninitialized(requiredLength, m_bufferCharacters16);
+ auto buffer = StringImpl::createUninitialized(requiredLength, m_bufferCharacters16);
memcpy(m_bufferCharacters16, currentCharacters, static_cast<size_t>(m_length) * sizeof(UChar)); // This can't overflow.
// Update the builder state.
- m_buffer = buffer.release();
+ m_buffer = WTFMove(buffer);
m_string = String();
+ ASSERT(m_buffer->length() == requiredLength);
}
// Allocate a new 16 bit buffer, copying in currentCharacters (which is 8 bit and may come
@@ -128,16 +127,18 @@ void StringBuilder::allocateBuffer(const UChar* currentCharacters, unsigned requ
void StringBuilder::allocateBufferUpConvert(const LChar* currentCharacters, unsigned requiredLength)
{
ASSERT(m_is8Bit);
+ ASSERT(requiredLength >= m_length);
// Copy the existing data into a new buffer, set result to point to the end of the existing data.
- RefPtr<StringImpl> buffer = StringImpl::createUninitialized(requiredLength, m_bufferCharacters16);
+ auto buffer = StringImpl::createUninitialized(requiredLength, m_bufferCharacters16);
for (unsigned i = 0; i < m_length; ++i)
m_bufferCharacters16[i] = currentCharacters[i];
m_is8Bit = false;
// Update the builder state.
- m_buffer = buffer.release();
+ m_buffer = WTFMove(buffer);
m_string = String();
+ ASSERT(m_buffer->length() == requiredLength);
}
template <>
@@ -151,9 +152,10 @@ void StringBuilder::reallocateBuffer<LChar>(unsigned requiredLength)
ASSERT(m_buffer->is8Bit());
if (m_buffer->hasOneRef())
- m_buffer = StringImpl::reallocate(m_buffer.release(), requiredLength, m_bufferCharacters8);
+ m_buffer = StringImpl::reallocate(m_buffer.releaseNonNull(), requiredLength, m_bufferCharacters8);
else
allocateBuffer(m_buffer->characters8(), requiredLength);
+ ASSERT(m_buffer->length() == requiredLength);
}
template <>
@@ -166,9 +168,10 @@ void StringBuilder::reallocateBuffer<UChar>(unsigned requiredLength)
if (m_buffer->is8Bit())
allocateBufferUpConvert(m_buffer->characters8(), requiredLength);
else if (m_buffer->hasOneRef())
- m_buffer = StringImpl::reallocate(m_buffer.release(), requiredLength, m_bufferCharacters16);
+ m_buffer = StringImpl::reallocate(m_buffer.releaseNonNull(), requiredLength, m_bufferCharacters16);
else
allocateBuffer(m_buffer->characters16(), requiredLength);
+ ASSERT(m_buffer->length() == requiredLength);
}
void StringBuilder::reserveCapacity(unsigned newCapacity)
@@ -193,6 +196,7 @@ void StringBuilder::reserveCapacity(unsigned newCapacity)
allocateBuffer(m_string.characters16(), newCapacity);
}
}
+ ASSERT(!newCapacity || m_buffer->length() >= newCapacity);
}
// Make 'length' additional capacity be available in m_buffer, update m_string & m_length,
@@ -233,11 +237,12 @@ CharType* StringBuilder::appendUninitializedSlow(unsigned requiredLength)
reallocateBuffer<CharType>(expandedCapacity(capacity(), requiredLength));
} else {
ASSERT(m_string.length() == m_length);
- allocateBuffer(m_length ? m_string.getCharacters<CharType>() : 0, expandedCapacity(capacity(), requiredLength));
+ allocateBuffer(m_length ? m_string.characters<CharType>() : 0, expandedCapacity(capacity(), requiredLength));
}
CharType* result = getBufferCharacters<CharType>() + m_length;
m_length = requiredLength;
+ ASSERT(m_buffer->length() >= m_length);
return result;
}
@@ -271,10 +276,11 @@ void StringBuilder::append(const UChar* characters, unsigned length)
allocateBufferUpConvert(m_string.isNull() ? 0 : m_string.characters8(), expandedCapacity(capacity(), requiredLength));
}
- memcpy(m_bufferCharacters16 + m_length, characters, static_cast<size_t>(length) * sizeof(UChar));
+ memcpy(m_bufferCharacters16 + m_length, characters, static_cast<size_t>(length) * sizeof(UChar));
m_length = requiredLength;
} else
memcpy(appendUninitialized<UChar>(length), characters, static_cast<size_t>(length) * sizeof(UChar));
+ ASSERT(m_buffer->length() >= m_length);
}
void StringBuilder::append(const LChar* characters, unsigned length)
@@ -300,6 +306,20 @@ void StringBuilder::append(const LChar* characters, unsigned length)
}
}
+#if USE(CF)
+
+void StringBuilder::append(CFStringRef string)
+{
+ // Fast path: avoid constructing a temporary String when possible.
+ if (auto* characters = CFStringGetCStringPtr(string, kCFStringEncodingISOLatin1)) {
+ append(reinterpret_cast<const LChar*>(characters), CFStringGetLength(string));
+ return;
+ }
+ append(String(string));
+}
+
+#endif
+
void StringBuilder::appendNumber(int number)
{
numberToStringSigned<StringBuilder>(number, this);
@@ -361,8 +381,103 @@ void StringBuilder::shrinkToFit()
reallocateBuffer<LChar>(m_length);
else
reallocateBuffer<UChar>(m_length);
- m_string = m_buffer.release();
+ m_string = WTFMove(m_buffer);
+ }
+}
+
+template <typename OutputCharacterType, typename InputCharacterType>
+static void appendQuotedJSONStringInternalSlow(OutputCharacterType*& output, const InputCharacterType character)
+{
+ switch (character) {
+ case '\t':
+ *output++ = '\\';
+ *output++ = 't';
+ break;
+ case '\r':
+ *output++ = '\\';
+ *output++ = 'r';
+ break;
+ case '\n':
+ *output++ = '\\';
+ *output++ = 'n';
+ break;
+ case '\f':
+ *output++ = '\\';
+ *output++ = 'f';
+ break;
+ case '\b':
+ *output++ = '\\';
+ *output++ = 'b';
+ break;
+ default:
+ ASSERT(!(character & 0xFF00));
+ *output++ = '\\';
+ *output++ = 'u';
+ *output++ = '0';
+ *output++ = '0';
+ *output++ = upperNibbleToLowercaseASCIIHexDigit(character);
+ *output++ = lowerNibbleToLowercaseASCIIHexDigit(character);
+ break;
+ }
+}
+
+template <typename OutputCharacterType, typename InputCharacterType>
+static void appendQuotedJSONStringInternal(OutputCharacterType*& output, const InputCharacterType* input, unsigned length)
+{
+ for (const InputCharacterType* end = input + length; input != end; ++input) {
+ const InputCharacterType character = *input;
+ if (LIKELY(character != '"' && character != '\\' && character > 0x1F)) {
+ *output++ = character;
+ continue;
+ }
+
+ if (character == '"' || character == '\\') {
+ *output++ = '\\';
+ *output++ = character;
+ continue;
+ }
+
+ appendQuotedJSONStringInternalSlow(output, character);
+ }
+}
+
+void StringBuilder::appendQuotedJSONString(const String& string)
+{
+ // Make sure we have enough buffer space to append this string without having
+ // to worry about reallocating in the middle.
+ // The 2 is for the '"' quotes on each end.
+ // The 6 is for characters that need to be \uNNNN encoded.
+ Checked<unsigned> stringLength = string.length();
+ Checked<unsigned> maximumCapacityRequired = length();
+ maximumCapacityRequired += 2 + stringLength * 6;
+ unsigned allocationSize = maximumCapacityRequired.unsafeGet();
+ // This max() is here to allow us to allocate sizes between the range [2^31, 2^32 - 2] because roundUpToPowerOfTwo(1<<31 + some int smaller than 1<<31) == 0.
+ allocationSize = std::max(allocationSize, roundUpToPowerOfTwo(allocationSize));
+
+ if (is8Bit() && !string.is8Bit())
+ allocateBufferUpConvert(m_bufferCharacters8, allocationSize);
+ else
+ reserveCapacity(allocationSize);
+ ASSERT(m_buffer->length() >= allocationSize);
+
+ if (is8Bit()) {
+ ASSERT(string.is8Bit());
+ LChar* output = m_bufferCharacters8 + m_length;
+ *output++ = '"';
+ appendQuotedJSONStringInternal(output, string.characters8(), string.length());
+ *output++ = '"';
+ m_length = output - m_bufferCharacters8;
+ } else {
+ UChar* output = m_bufferCharacters16 + m_length;
+ *output++ = '"';
+ if (string.is8Bit())
+ appendQuotedJSONStringInternal(output, string.characters8(), string.length());
+ else
+ appendQuotedJSONStringInternal(output, string.characters16(), string.length());
+ *output++ = '"';
+ m_length = output - m_bufferCharacters16;
}
+ ASSERT(m_buffer->length() >= m_length);
}
} // namespace WTF
diff --git a/Source/WTF/wtf/text/StringBuilder.h b/Source/WTF/wtf/text/StringBuilder.h
index 26be90633..d02737a02 100644
--- a/Source/WTF/wtf/text/StringBuilder.h
+++ b/Source/WTF/wtf/text/StringBuilder.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2010, 2012-2013, 2016 Apple Inc. All rights reserved.
* Copyright (C) 2012 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,6 +28,8 @@
#define StringBuilder_h
#include <wtf/text/AtomicString.h>
+#include <wtf/text/IntegerToStringConversion.h>
+#include <wtf/text/StringView.h>
#include <wtf/text/WTFString.h>
namespace WTF {
@@ -40,7 +42,6 @@ public:
StringBuilder()
: m_length(0)
, m_is8Bit(true)
- , m_valid16BitShadowLength(0)
, m_bufferCharacters8(0)
{
}
@@ -50,6 +51,11 @@ public:
ALWAYS_INLINE void append(const char* characters, unsigned length) { append(reinterpret_cast<const LChar*>(characters), length); }
+ void append(const AtomicString& atomicString)
+ {
+ append(atomicString.string());
+ }
+
void append(const String& string)
{
if (!string.length())
@@ -89,6 +95,21 @@ public:
append(other.characters16(), other.m_length);
}
+ void append(StringView stringView)
+ {
+ if (stringView.is8Bit())
+ append(stringView.characters8(), stringView.length());
+ else
+ append(stringView.characters16(), stringView.length());
+ }
+
+#if USE(CF)
+ WTF_EXPORT_PRIVATE void append(CFStringRef);
+#endif
+#if USE(CF) && defined(__OBJC__)
+ void append(NSString *string) { append((__bridge CFStringRef)string); }
+#endif
+
void append(const String& string, unsigned offset, unsigned length)
{
if (!string.length())
@@ -151,6 +172,8 @@ public:
append(U16_TRAIL(c));
}
+ WTF_EXPORT_PRIVATE void appendQuotedJSONString(const String&);
+
template<unsigned charactersCount>
ALWAYS_INLINE void appendLiteral(const char (&characters)[charactersCount]) { append(characters, charactersCount - 1); }
@@ -248,32 +271,15 @@ public:
return m_buffer->characters16();
}
- const UChar* characters() const { return deprecatedCharacters(); } // FIXME: Delete this.
- const UChar* deprecatedCharacters() const
- {
- if (!m_length)
- return 0;
- if (!m_string.isNull())
- return m_string.deprecatedCharacters();
- ASSERT(m_buffer);
- if (m_buffer->has16BitShadow() && m_valid16BitShadowLength < m_length)
- m_buffer->upconvertCharacters(m_valid16BitShadowLength, m_length);
-
- m_valid16BitShadowLength = m_length;
-
- return m_buffer->deprecatedCharacters();
- }
-
bool is8Bit() const { return m_is8Bit; }
void clear()
{
m_length = 0;
m_string = String();
- m_buffer = 0;
+ m_buffer = nullptr;
m_bufferCharacters8 = 0;
m_is8Bit = true;
- m_valid16BitShadowLength = 0;
}
void swap(StringBuilder& stringBuilder)
@@ -282,8 +288,8 @@ public:
m_string.swap(stringBuilder.m_string);
m_buffer.swap(stringBuilder.m_buffer);
std::swap(m_is8Bit, stringBuilder.m_is8Bit);
- std::swap(m_valid16BitShadowLength, stringBuilder.m_valid16BitShadowLength);
std::swap(m_bufferCharacters8, stringBuilder.m_bufferCharacters8);
+ ASSERT(!m_buffer || m_buffer->length() >= m_length);
}
private:
@@ -304,7 +310,6 @@ private:
mutable String m_string;
RefPtr<StringImpl> m_buffer;
bool m_is8Bit;
- mutable unsigned m_valid16BitShadowLength;
union {
LChar* m_bufferCharacters8;
UChar* m_bufferCharacters16;
@@ -364,6 +369,12 @@ inline bool operator!=(const StringBuilder& a, const String& b) { return !equal(
inline bool operator==(const String& a, const StringBuilder& b) { return equal(b, a); }
inline bool operator!=(const String& a, const StringBuilder& b) { return !equal(b, a); }
+template<> struct IntegerToStringConversionTrait<StringBuilder> {
+ using ReturnType = void;
+ using AdditionalArgumentType = StringBuilder;
+ static void flush(LChar* characters, unsigned length, StringBuilder* stringBuilder) { stringBuilder->append(characters, length); }
+};
+
} // namespace WTF
using WTF::StringBuilder;
diff --git a/Source/WTF/wtf/text/StringCommon.h b/Source/WTF/wtf/text/StringCommon.h
new file mode 100644
index 000000000..d35d8905d
--- /dev/null
+++ b/Source/WTF/wtf/text/StringCommon.h
@@ -0,0 +1,656 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef StringCommon_h
+#define StringCommon_h
+
+#include <unicode/uchar.h>
+#include <wtf/ASCIICType.h>
+
+namespace WTF {
+
+template<typename CharacterTypeA, typename CharacterTypeB> bool equalIgnoringASCIICase(const CharacterTypeA*, const CharacterTypeB*, unsigned length);
+template<typename CharacterTypeA, typename CharacterTypeB> bool equalIgnoringASCIICase(const CharacterTypeA*, unsigned lengthA, const CharacterTypeB*, unsigned lengthB);
+
+template<typename StringClassA, typename StringClassB> bool equalIgnoringASCIICaseCommon(const StringClassA&, const StringClassB&);
+
+template<typename CharacterType> bool equalLettersIgnoringASCIICase(const CharacterType*, const char* lowercaseLetters, unsigned length);
+template<typename CharacterType, unsigned lowercaseLettersLength> bool equalLettersIgnoringASCIICase(const CharacterType*, unsigned charactersLength, const char (&lowercaseLetters)[lowercaseLettersLength]);
+
+template<typename StringClass, unsigned length> bool equalLettersIgnoringASCIICaseCommon(const StringClass&, const char (&lowercaseLetters)[length]);
+
+template<typename T>
+inline T loadUnaligned(const char* s)
+{
+#if COMPILER(CLANG)
+ T tmp;
+ memcpy(&tmp, s, sizeof(T));
+ return tmp;
+#else
+ // This may result in undefined behavior due to unaligned access.
+ return *reinterpret_cast<const T*>(s);
+#endif
+}
+
+// Do comparisons 8 or 4 bytes-at-a-time on architectures where it's safe.
+#if (CPU(X86_64) || CPU(ARM64)) && !ASAN_ENABLED
+ALWAYS_INLINE bool equal(const LChar* aLChar, const LChar* bLChar, unsigned length)
+{
+ unsigned dwordLength = length >> 3;
+
+ const char* a = reinterpret_cast<const char*>(aLChar);
+ const char* b = reinterpret_cast<const char*>(bLChar);
+
+ if (dwordLength) {
+ for (unsigned i = 0; i != dwordLength; ++i) {
+ if (loadUnaligned<uint64_t>(a) != loadUnaligned<uint64_t>(b))
+ return false;
+
+ a += sizeof(uint64_t);
+ b += sizeof(uint64_t);
+ }
+ }
+
+ if (length & 4) {
+ if (loadUnaligned<uint32_t>(a) != loadUnaligned<uint32_t>(b))
+ return false;
+
+ a += sizeof(uint32_t);
+ b += sizeof(uint32_t);
+ }
+
+ if (length & 2) {
+ if (loadUnaligned<uint16_t>(a) != loadUnaligned<uint16_t>(b))
+ return false;
+
+ a += sizeof(uint16_t);
+ b += sizeof(uint16_t);
+ }
+
+ if (length & 1 && (*reinterpret_cast<const LChar*>(a) != *reinterpret_cast<const LChar*>(b)))
+ return false;
+
+ return true;
+}
+
+ALWAYS_INLINE bool equal(const UChar* aUChar, const UChar* bUChar, unsigned length)
+{
+ unsigned dwordLength = length >> 2;
+
+ const char* a = reinterpret_cast<const char*>(aUChar);
+ const char* b = reinterpret_cast<const char*>(bUChar);
+
+ if (dwordLength) {
+ for (unsigned i = 0; i != dwordLength; ++i) {
+ if (loadUnaligned<uint64_t>(a) != loadUnaligned<uint64_t>(b))
+ return false;
+
+ a += sizeof(uint64_t);
+ b += sizeof(uint64_t);
+ }
+ }
+
+ if (length & 2) {
+ if (loadUnaligned<uint32_t>(a) != loadUnaligned<uint32_t>(b))
+ return false;
+
+ a += sizeof(uint32_t);
+ b += sizeof(uint32_t);
+ }
+
+ if (length & 1 && (*reinterpret_cast<const UChar*>(a) != *reinterpret_cast<const UChar*>(b)))
+ return false;
+
+ return true;
+}
+#elif CPU(X86) && !ASAN_ENABLED
+ALWAYS_INLINE bool equal(const LChar* aLChar, const LChar* bLChar, unsigned length)
+{
+ const char* a = reinterpret_cast<const char*>(aLChar);
+ const char* b = reinterpret_cast<const char*>(bLChar);
+
+ unsigned wordLength = length >> 2;
+ for (unsigned i = 0; i != wordLength; ++i) {
+ if (loadUnaligned<uint32_t>(a) != loadUnaligned<uint32_t>(b))
+ return false;
+ a += sizeof(uint32_t);
+ b += sizeof(uint32_t);
+ }
+
+ length &= 3;
+
+ if (length) {
+ const LChar* aRemainder = reinterpret_cast<const LChar*>(a);
+ const LChar* bRemainder = reinterpret_cast<const LChar*>(b);
+
+ for (unsigned i = 0; i < length; ++i) {
+ if (aRemainder[i] != bRemainder[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+ALWAYS_INLINE bool equal(const UChar* aUChar, const UChar* bUChar, unsigned length)
+{
+ const char* a = reinterpret_cast<const char*>(aUChar);
+ const char* b = reinterpret_cast<const char*>(bUChar);
+
+ unsigned wordLength = length >> 1;
+ for (unsigned i = 0; i != wordLength; ++i) {
+ if (loadUnaligned<uint32_t>(a) != loadUnaligned<uint32_t>(b))
+ return false;
+ a += sizeof(uint32_t);
+ b += sizeof(uint32_t);
+ }
+
+ if (length & 1 && *reinterpret_cast<const UChar*>(a) != *reinterpret_cast<const UChar*>(b))
+ return false;
+
+ return true;
+}
+#elif PLATFORM(IOS) && WTF_ARM_ARCH_AT_LEAST(7) && !ASAN_ENABLED
+ALWAYS_INLINE bool equal(const LChar* a, const LChar* b, unsigned length)
+{
+ bool isEqual = false;
+ uint32_t aValue;
+ uint32_t bValue;
+ asm("subs %[length], #4\n"
+ "blo 2f\n"
+
+ "0:\n" // Label 0 = Start of loop over 32 bits.
+ "ldr %[aValue], [%[a]], #4\n"
+ "ldr %[bValue], [%[b]], #4\n"
+ "cmp %[aValue], %[bValue]\n"
+ "bne 66f\n"
+ "subs %[length], #4\n"
+ "bhs 0b\n"
+
+ // At this point, length can be:
+ // -0: 00000000000000000000000000000000 (0 bytes left)
+ // -1: 11111111111111111111111111111111 (3 bytes left)
+ // -2: 11111111111111111111111111111110 (2 bytes left)
+ // -3: 11111111111111111111111111111101 (1 byte left)
+ // -4: 11111111111111111111111111111100 (length was 0)
+ // The pointers are at the correct position.
+ "2:\n" // Label 2 = End of loop over 32 bits, check for pair of characters.
+ "tst %[length], #2\n"
+ "beq 1f\n"
+ "ldrh %[aValue], [%[a]], #2\n"
+ "ldrh %[bValue], [%[b]], #2\n"
+ "cmp %[aValue], %[bValue]\n"
+ "bne 66f\n"
+
+ "1:\n" // Label 1 = Check for a single character left.
+ "tst %[length], #1\n"
+ "beq 42f\n"
+ "ldrb %[aValue], [%[a]]\n"
+ "ldrb %[bValue], [%[b]]\n"
+ "cmp %[aValue], %[bValue]\n"
+ "bne 66f\n"
+
+ "42:\n" // Label 42 = Success.
+ "mov %[isEqual], #1\n"
+ "66:\n" // Label 66 = End without changing isEqual to 1.
+ : [length]"+r"(length), [isEqual]"+r"(isEqual), [a]"+r"(a), [b]"+r"(b), [aValue]"+r"(aValue), [bValue]"+r"(bValue)
+ :
+ :
+ );
+ return isEqual;
+}
+
+ALWAYS_INLINE bool equal(const UChar* a, const UChar* b, unsigned length)
+{
+ bool isEqual = false;
+ uint32_t aValue;
+ uint32_t bValue;
+ asm("subs %[length], #2\n"
+ "blo 1f\n"
+
+ "0:\n" // Label 0 = Start of loop over 32 bits.
+ "ldr %[aValue], [%[a]], #4\n"
+ "ldr %[bValue], [%[b]], #4\n"
+ "cmp %[aValue], %[bValue]\n"
+ "bne 66f\n"
+ "subs %[length], #2\n"
+ "bhs 0b\n"
+
+ // At this point, length can be:
+ // -0: 00000000000000000000000000000000 (0 bytes left)
+ // -1: 11111111111111111111111111111111 (1 character left, 2 bytes)
+ // -2: 11111111111111111111111111111110 (length was zero)
+ // The pointers are at the correct position.
+ "1:\n" // Label 1 = Check for a single character left.
+ "tst %[length], #1\n"
+ "beq 42f\n"
+ "ldrh %[aValue], [%[a]]\n"
+ "ldrh %[bValue], [%[b]]\n"
+ "cmp %[aValue], %[bValue]\n"
+ "bne 66f\n"
+
+ "42:\n" // Label 42 = Success.
+ "mov %[isEqual], #1\n"
+ "66:\n" // Label 66 = End without changing isEqual to 1.
+ : [length]"+r"(length), [isEqual]"+r"(isEqual), [a]"+r"(a), [b]"+r"(b), [aValue]"+r"(aValue), [bValue]"+r"(bValue)
+ :
+ :
+ );
+ return isEqual;
+}
+#elif !ASAN_ENABLED
+ALWAYS_INLINE bool equal(const LChar* a, const LChar* b, unsigned length) { return !memcmp(a, b, length); }
+ALWAYS_INLINE bool equal(const UChar* a, const UChar* b, unsigned length) { return !memcmp(a, b, length * sizeof(UChar)); }
+#else
+ALWAYS_INLINE bool equal(const LChar* a, const LChar* b, unsigned length)
+{
+ for (unsigned i = 0; i < length; ++i) {
+ if (a[i] != b[i])
+ return false;
+ }
+ return true;
+}
+ALWAYS_INLINE bool equal(const UChar* a, const UChar* b, unsigned length)
+{
+ for (unsigned i = 0; i < length; ++i) {
+ if (a[i] != b[i])
+ return false;
+ }
+ return true;
+}
+#endif
+
+ALWAYS_INLINE bool equal(const LChar* a, const UChar* b, unsigned length)
+{
+ for (unsigned i = 0; i < length; ++i) {
+ if (a[i] != b[i])
+ return false;
+ }
+ return true;
+}
+
+ALWAYS_INLINE bool equal(const UChar* a, const LChar* b, unsigned length) { return equal(b, a, length); }
+
+template<typename StringClassA, typename StringClassB>
+ALWAYS_INLINE bool equalCommon(const StringClassA& a, const StringClassB& b)
+{
+ unsigned length = a.length();
+ if (length != b.length())
+ return false;
+
+ if (a.is8Bit()) {
+ if (b.is8Bit())
+ return equal(a.characters8(), b.characters8(), length);
+
+ return equal(a.characters8(), b.characters16(), length);
+ }
+
+ if (b.is8Bit())
+ return equal(a.characters16(), b.characters8(), length);
+
+ return equal(a.characters16(), b.characters16(), length);
+}
+
+template<typename StringClassA, typename StringClassB>
+ALWAYS_INLINE bool equalCommon(const StringClassA* a, const StringClassB* b)
+{
+ if (a == b)
+ return true;
+ if (!a || !b)
+ return false;
+ return equal(*a, *b);
+}
+
+template<typename StringClass, unsigned length> bool equal(const StringClass& a, const UChar (&codeUnits)[length])
+{
+ if (a.length() != length)
+ return false;
+
+ if (a.is8Bit())
+ return equal(a.characters8(), codeUnits, length);
+
+ return equal(a.characters16(), codeUnits, length);
+}
+
+template<typename CharacterTypeA, typename CharacterTypeB>
+inline bool equalIgnoringASCIICase(const CharacterTypeA* a, const CharacterTypeB* b, unsigned length)
+{
+ for (unsigned i = 0; i < length; ++i) {
+ if (toASCIILower(a[i]) != toASCIILower(b[i]))
+ return false;
+ }
+ return true;
+}
+
+template<typename CharacterTypeA, typename CharacterTypeB> inline bool equalIgnoringASCIICase(const CharacterTypeA* a, unsigned lengthA, const CharacterTypeB* b, unsigned lengthB)
+{
+ return lengthA == lengthB && equalIgnoringASCIICase(a, b, lengthA);
+}
+
+template<typename StringClassA, typename StringClassB>
+bool equalIgnoringASCIICaseCommon(const StringClassA& a, const StringClassB& b)
+{
+ unsigned length = a.length();
+ if (length != b.length())
+ return false;
+
+ if (a.is8Bit()) {
+ if (b.is8Bit())
+ return equalIgnoringASCIICase(a.characters8(), b.characters8(), length);
+
+ return equalIgnoringASCIICase(a.characters8(), b.characters16(), length);
+ }
+
+ if (b.is8Bit())
+ return equalIgnoringASCIICase(a.characters16(), b.characters8(), length);
+
+ return equalIgnoringASCIICase(a.characters16(), b.characters16(), length);
+}
+
+template<typename StringClassA> bool equalIgnoringASCIICaseCommon(const StringClassA& a, const char* b)
+{
+ unsigned length = a.length();
+ if (length != strlen(b))
+ return false;
+
+ if (a.is8Bit())
+ return equalIgnoringASCIICase(a.characters8(), b, length);
+
+ return equalIgnoringASCIICase(a.characters16(), b, length);
+}
+
+template<typename StringClassA, typename StringClassB>
+bool startsWith(const StringClassA& reference, const StringClassB& prefix)
+{
+ unsigned prefixLength = prefix.length();
+ if (prefixLength > reference.length())
+ return false;
+
+ if (reference.is8Bit()) {
+ if (prefix.is8Bit())
+ return equal(reference.characters8(), prefix.characters8(), prefixLength);
+ return equal(reference.characters8(), prefix.characters16(), prefixLength);
+ }
+ if (prefix.is8Bit())
+ return equal(reference.characters16(), prefix.characters8(), prefixLength);
+ return equal(reference.characters16(), prefix.characters16(), prefixLength);
+}
+
+template<typename StringClassA, typename StringClassB>
+bool startsWithIgnoringASCIICase(const StringClassA& reference, const StringClassB& prefix)
+{
+ unsigned prefixLength = prefix.length();
+ if (prefixLength > reference.length())
+ return false;
+
+ if (reference.is8Bit()) {
+ if (prefix.is8Bit())
+ return equalIgnoringASCIICase(reference.characters8(), prefix.characters8(), prefixLength);
+ return equalIgnoringASCIICase(reference.characters8(), prefix.characters16(), prefixLength);
+ }
+ if (prefix.is8Bit())
+ return equalIgnoringASCIICase(reference.characters16(), prefix.characters8(), prefixLength);
+ return equalIgnoringASCIICase(reference.characters16(), prefix.characters16(), prefixLength);
+}
+
+template<typename StringClassA, typename StringClassB>
+bool endsWith(const StringClassA& reference, const StringClassB& suffix)
+{
+ unsigned suffixLength = suffix.length();
+ unsigned referenceLength = reference.length();
+ if (suffixLength > referenceLength)
+ return false;
+
+ unsigned startOffset = referenceLength - suffixLength;
+
+ if (reference.is8Bit()) {
+ if (suffix.is8Bit())
+ return equal(reference.characters8() + startOffset, suffix.characters8(), suffixLength);
+ return equal(reference.characters8() + startOffset, suffix.characters16(), suffixLength);
+ }
+ if (suffix.is8Bit())
+ return equal(reference.characters16() + startOffset, suffix.characters8(), suffixLength);
+ return equal(reference.characters16() + startOffset, suffix.characters16(), suffixLength);
+}
+
+template<typename StringClassA, typename StringClassB>
+bool endsWithIgnoringASCIICase(const StringClassA& reference, const StringClassB& suffix)
+{
+ unsigned suffixLength = suffix.length();
+ unsigned referenceLength = reference.length();
+ if (suffixLength > referenceLength)
+ return false;
+
+ unsigned startOffset = referenceLength - suffixLength;
+
+ if (reference.is8Bit()) {
+ if (suffix.is8Bit())
+ return equalIgnoringASCIICase(reference.characters8() + startOffset, suffix.characters8(), suffixLength);
+ return equalIgnoringASCIICase(reference.characters8() + startOffset, suffix.characters16(), suffixLength);
+ }
+ if (suffix.is8Bit())
+ return equalIgnoringASCIICase(reference.characters16() + startOffset, suffix.characters8(), suffixLength);
+ return equalIgnoringASCIICase(reference.characters16() + startOffset, suffix.characters16(), suffixLength);
+}
+
+template <typename SearchCharacterType, typename MatchCharacterType>
+size_t findIgnoringASCIICase(const SearchCharacterType* source, const MatchCharacterType* matchCharacters, unsigned startOffset, unsigned searchLength, unsigned matchLength)
+{
+ ASSERT(searchLength >= matchLength);
+
+ const SearchCharacterType* startSearchedCharacters = source + startOffset;
+
+ // delta is the number of additional times to test; delta == 0 means test only once.
+ unsigned delta = searchLength - matchLength;
+
+ for (unsigned i = 0; i <= delta; ++i) {
+ if (equalIgnoringASCIICase(startSearchedCharacters + i, matchCharacters, matchLength))
+ return startOffset + i;
+ }
+ return notFound;
+}
+
+template<typename StringClassA, typename StringClassB>
+size_t findIgnoringASCIICase(const StringClassA& source, const StringClassB& stringToFind, unsigned startOffset)
+{
+ unsigned sourceStringLength = source.length();
+ unsigned matchLength = stringToFind.length();
+ if (!matchLength)
+ return std::min(startOffset, sourceStringLength);
+
+ // Check startOffset & matchLength are in range.
+ if (startOffset > sourceStringLength)
+ return notFound;
+ unsigned searchLength = sourceStringLength - startOffset;
+ if (matchLength > searchLength)
+ return notFound;
+
+ if (source.is8Bit()) {
+ if (stringToFind.is8Bit())
+ return findIgnoringASCIICase(source.characters8(), stringToFind.characters8(), startOffset, searchLength, matchLength);
+ return findIgnoringASCIICase(source.characters8(), stringToFind.characters16(), startOffset, searchLength, matchLength);
+ }
+
+ if (stringToFind.is8Bit())
+ return findIgnoringASCIICase(source.characters16(), stringToFind.characters8(), startOffset, searchLength, matchLength);
+
+ return findIgnoringASCIICase(source.characters16(), stringToFind.characters16(), startOffset, searchLength, matchLength);
+}
+
+template <typename SearchCharacterType, typename MatchCharacterType>
+ALWAYS_INLINE static size_t findInner(const SearchCharacterType* searchCharacters, const MatchCharacterType* matchCharacters, unsigned index, unsigned searchLength, unsigned matchLength)
+{
+ // Optimization: keep a running hash of the strings,
+ // only call equal() if the hashes match.
+
+ // delta is the number of additional times to test; delta == 0 means test only once.
+ unsigned delta = searchLength - matchLength;
+
+ unsigned searchHash = 0;
+ unsigned matchHash = 0;
+
+ for (unsigned i = 0; i < matchLength; ++i) {
+ searchHash += searchCharacters[i];
+ matchHash += matchCharacters[i];
+ }
+
+ unsigned i = 0;
+ // keep looping until we match
+ while (searchHash != matchHash || !equal(searchCharacters + i, matchCharacters, matchLength)) {
+ if (i == delta)
+ return notFound;
+ searchHash += searchCharacters[i + matchLength];
+ searchHash -= searchCharacters[i];
+ ++i;
+ }
+ return index + i;
+}
+
+template<typename CharacterType>
+inline size_t find(const CharacterType* characters, unsigned length, CharacterType matchCharacter, unsigned index = 0)
+{
+ while (index < length) {
+ if (characters[index] == matchCharacter)
+ return index;
+ ++index;
+ }
+ return notFound;
+}
+
+ALWAYS_INLINE size_t find(const UChar* characters, unsigned length, LChar matchCharacter, unsigned index = 0)
+{
+ return find(characters, length, static_cast<UChar>(matchCharacter), index);
+}
+
+inline size_t find(const LChar* characters, unsigned length, UChar matchCharacter, unsigned index = 0)
+{
+ if (matchCharacter & ~0xFF)
+ return notFound;
+ return find(characters, length, static_cast<LChar>(matchCharacter), index);
+}
+
+template<typename StringClass>
+size_t findCommon(const StringClass& haystack, const StringClass& needle, unsigned start)
+{
+ unsigned needleLength = needle.length();
+
+ if (needleLength == 1) {
+ if (haystack.is8Bit())
+ return WTF::find(haystack.characters8(), haystack.length(), needle[0], start);
+ return WTF::find(haystack.characters16(), haystack.length(), needle[0], start);
+ }
+
+ if (!needleLength)
+ return std::min(start, haystack.length());
+
+ if (start > haystack.length())
+ return notFound;
+ unsigned searchLength = haystack.length() - start;
+ if (needleLength > searchLength)
+ return notFound;
+
+ if (haystack.is8Bit()) {
+ if (needle.is8Bit())
+ return findInner(haystack.characters8() + start, needle.characters8(), start, searchLength, needleLength);
+ return findInner(haystack.characters8() + start, needle.characters16(), start, searchLength, needleLength);
+ }
+
+ if (needle.is8Bit())
+ return findInner(haystack.characters16() + start, needle.characters8(), start, searchLength, needleLength);
+
+ return findInner(haystack.characters16() + start, needle.characters16(), start, searchLength, needleLength);
+}
+
+// This is marked inline since it's mostly used in non-inline functions for each string type.
+// When used directly in code it's probably OK to be inline; maybe the loop will be unrolled.
+template<typename CharacterType> inline bool equalLettersIgnoringASCIICase(const CharacterType* characters, const char* lowercaseLetters, unsigned length)
+{
+ for (unsigned i = 0; i < length; ++i) {
+ if (!isASCIIAlphaCaselessEqual(characters[i], lowercaseLetters[i]))
+ return false;
+ }
+ return true;
+}
+
+template<typename CharacterType, unsigned lowercaseLettersLength> inline bool equalLettersIgnoringASCIICase(const CharacterType* characters, unsigned charactersLength, const char (&lowercaseLetters)[lowercaseLettersLength])
+{
+ ASSERT(strlen(lowercaseLetters) == lowercaseLettersLength - 1);
+ unsigned lowercaseLettersStringLength = lowercaseLettersLength - 1;
+ return charactersLength == lowercaseLettersStringLength && equalLettersIgnoringASCIICase(characters, lowercaseLetters, lowercaseLettersStringLength);
+}
+
+template<typename StringClass> bool inline hasPrefixWithLettersIgnoringASCIICaseCommon(const StringClass& string, const char* lowercaseLetters, unsigned length)
+{
+#if !ASSERT_DISABLED
+ ASSERT(*lowercaseLetters);
+ for (const char* letter = lowercaseLetters; *letter; ++letter)
+ ASSERT(toASCIILowerUnchecked(*letter) == *letter);
+#endif
+ ASSERT(string.length() >= length);
+
+ if (string.is8Bit())
+ return equalLettersIgnoringASCIICase(string.characters8(), lowercaseLetters, length);
+ return equalLettersIgnoringASCIICase(string.characters16(), lowercaseLetters, length);
+}
+
+// This is intentionally not marked inline because it's used often and is not speed-critical enough to want it inlined everywhere.
+template<typename StringClass> bool equalLettersIgnoringASCIICaseCommonWithoutLength(const StringClass& string, const char* lowercaseLetters)
+{
+ unsigned length = string.length();
+ if (length != strlen(lowercaseLetters))
+ return false;
+ return hasPrefixWithLettersIgnoringASCIICaseCommon(string, lowercaseLetters, length);
+}
+
+template<typename StringClass> bool startsWithLettersIgnoringASCIICaseCommonWithoutLength(const StringClass& string, const char* lowercaseLetters)
+{
+ size_t prefixLength = strlen(lowercaseLetters);
+ if (!prefixLength)
+ return true;
+ if (string.length() < prefixLength)
+ return false;
+ return hasPrefixWithLettersIgnoringASCIICaseCommon(string, lowercaseLetters, prefixLength);
+}
+
+template<typename StringClass, unsigned length> inline bool equalLettersIgnoringASCIICaseCommon(const StringClass& string, const char (&lowercaseLetters)[length])
+{
+ // Don't actually use the length; we are choosing code size over speed.
+ ASSERT(strlen(lowercaseLetters) == length - 1);
+ const char* pointer = lowercaseLetters;
+ return equalLettersIgnoringASCIICaseCommonWithoutLength(string, pointer);
+}
+
+template<typename StringClass, unsigned length> inline bool startsWithLettersIgnoringASCIICaseCommon(const StringClass& string, const char (&lowercaseLetters)[length])
+{
+ const char* pointer = lowercaseLetters;
+ return startsWithLettersIgnoringASCIICaseCommonWithoutLength(string, pointer);
+}
+
+}
+
+using WTF::equalIgnoringASCIICase;
+using WTF::equalLettersIgnoringASCIICase;
+
+#endif // StringCommon_h
diff --git a/Source/WTF/wtf/text/StringConcatenate.h b/Source/WTF/wtf/text/StringConcatenate.h
index baeccc1d2..affb7e195 100644
--- a/Source/WTF/wtf/text/StringConcatenate.h
+++ b/Source/WTF/wtf/text/StringConcatenate.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +28,14 @@
#include <string.h>
-#ifndef WTFString_h
+#ifndef AtomicString_h
#include <wtf/text/AtomicString.h>
#endif
+#ifndef StringView_h
+#include <wtf/text/StringView.h>
+#endif
+
// This macro is helpful for testing how many intermediate Strings are created while evaluating an
// expression containing operator+.
#ifndef WTF_STRINGTYPEADAPTER_COPIED_WTF_STRING
@@ -41,382 +45,221 @@
namespace WTF {
template<typename StringType>
-class StringTypeAdapter {
-};
+class StringTypeAdapter;
template<>
class StringTypeAdapter<char> {
public:
- StringTypeAdapter<char>(char buffer)
- : m_buffer(buffer)
+ StringTypeAdapter<char>(char character)
+ : m_character(character)
{
}
unsigned length() { return 1; }
-
bool is8Bit() { return true; }
- void writeTo(LChar* destination)
+ void writeTo(LChar* destination) const
{
- *destination = m_buffer;
+ *destination = m_character;
}
- void writeTo(UChar* destination) { *destination = m_buffer; }
-
-private:
- unsigned char m_buffer;
-};
-
-template<>
-class StringTypeAdapter<LChar> {
-public:
- StringTypeAdapter<LChar>(LChar buffer)
- : m_buffer(buffer)
- {
- }
-
- unsigned length() { return 1; }
-
- bool is8Bit() { return true; }
-
- void writeTo(LChar* destination)
+ void writeTo(UChar* destination) const
{
- *destination = m_buffer;
+ *destination = m_character;
}
- void writeTo(UChar* destination) { *destination = m_buffer; }
+ String toString() const { return String(&m_character, 1); }
private:
- LChar m_buffer;
+ char m_character;
};
template<>
class StringTypeAdapter<UChar> {
public:
- StringTypeAdapter<UChar>(UChar buffer)
- : m_buffer(buffer)
+ StringTypeAdapter<UChar>(UChar character)
+ : m_character(character)
{
}
- unsigned length() { return 1; }
-
- bool is8Bit() { return m_buffer <= 0xff; }
+ unsigned length() const { return 1; }
+ bool is8Bit() const { return m_character <= 0xff; }
- void writeTo(LChar* destination)
+ void writeTo(LChar* destination) const
{
ASSERT(is8Bit());
- *destination = static_cast<LChar>(m_buffer);
+ *destination = static_cast<LChar>(m_character);
}
- void writeTo(UChar* destination) { *destination = m_buffer; }
-
-private:
- UChar m_buffer;
-};
-
-template<>
-class StringTypeAdapter<char*> {
-public:
- StringTypeAdapter<char*>(char* buffer)
- : m_buffer(buffer)
- , m_length(strlen(buffer))
- {
- }
-
- unsigned length() { return m_length; }
-
- bool is8Bit() { return true; }
-
- void writeTo(LChar* destination)
+ void writeTo(UChar* destination) const
{
- for (unsigned i = 0; i < m_length; ++i)
- destination[i] = static_cast<LChar>(m_buffer[i]);
+ *destination = m_character;
}
- void writeTo(UChar* destination)
- {
- for (unsigned i = 0; i < m_length; ++i) {
- unsigned char c = m_buffer[i];
- destination[i] = c;
- }
- }
+ String toString() const { return String(&m_character, 1); }
private:
- const char* m_buffer;
- unsigned m_length;
+ UChar m_character;
};
template<>
-class StringTypeAdapter<LChar*> {
+class StringTypeAdapter<const LChar*> {
public:
- StringTypeAdapter<LChar*>(LChar* buffer)
- : m_buffer(buffer)
- , m_length(strlen(reinterpret_cast<char*>(buffer)))
+ StringTypeAdapter(const LChar* characters)
+ : m_characters(characters)
+ , m_length(strlen(reinterpret_cast<const char*>(characters)))
{
}
- unsigned length() { return m_length; }
-
- bool is8Bit() { return true; }
+ unsigned length() const { return m_length; }
+ bool is8Bit() const { return true; }
- void writeTo(LChar* destination)
+ void writeTo(LChar* destination) const
{
- memcpy(destination, m_buffer, m_length * sizeof(LChar));
+ StringView(m_characters, m_length).getCharactersWithUpconvert(destination);
}
- void writeTo(UChar* destination)
+ void writeTo(UChar* destination) const
{
- StringImpl::copyChars(destination, m_buffer, m_length);
+ StringView(m_characters, m_length).getCharactersWithUpconvert(destination);
}
+ String toString() const { return String(m_characters, m_length); }
+
private:
- const LChar* m_buffer;
+ const LChar* m_characters;
unsigned m_length;
};
template<>
class StringTypeAdapter<const UChar*> {
public:
- StringTypeAdapter<const UChar*>(const UChar* buffer)
- : m_buffer(buffer)
+ StringTypeAdapter(const UChar* characters)
+ : m_characters(characters)
{
- size_t len = 0;
- while (m_buffer[len] != UChar(0))
- ++len;
+ unsigned length = 0;
+ while (m_characters[length])
+ ++length;
- if (len > std::numeric_limits<unsigned>::max())
+ if (length > std::numeric_limits<unsigned>::max()) // FIXME this is silly https://bugs.webkit.org/show_bug.cgi?id=165790
CRASH();
- m_length = len;
+ m_length = length;
}
- unsigned length() { return m_length; }
+ unsigned length() const { return m_length; }
+ bool is8Bit() const { return false; }
- bool is8Bit() { return false; }
-
- NO_RETURN_DUE_TO_CRASH void writeTo(LChar*)
+ NO_RETURN_DUE_TO_CRASH void writeTo(LChar*) const
{
- CRASH();
+ CRASH(); // FIXME make this a compile-time failure https://bugs.webkit.org/show_bug.cgi?id=165791
}
- void writeTo(UChar* destination)
+ void writeTo(UChar* destination) const
{
- memcpy(destination, m_buffer, m_length * sizeof(UChar));
+ memcpy(destination, m_characters, m_length * sizeof(UChar));
}
+ String toString() const { return String(m_characters, m_length); }
+
private:
- const UChar* m_buffer;
+ const UChar* m_characters;
unsigned m_length;
};
template<>
-class StringTypeAdapter<const char*> {
+class StringTypeAdapter<const char*> : public StringTypeAdapter<const LChar*> {
public:
- StringTypeAdapter<const char*>(const char* buffer)
- : m_buffer(buffer)
- , m_length(strlen(buffer))
- {
- }
-
- unsigned length() { return m_length; }
-
- bool is8Bit() { return true; }
-
- void writeTo(LChar* destination)
- {
- memcpy(destination, m_buffer, static_cast<size_t>(m_length) * sizeof(LChar));
- }
-
- void writeTo(UChar* destination)
+ StringTypeAdapter(const char* characters)
+ : StringTypeAdapter<const LChar*>(reinterpret_cast<const LChar*>(characters))
{
- for (unsigned i = 0; i < m_length; ++i) {
- unsigned char c = m_buffer[i];
- destination[i] = c;
- }
}
-
-private:
- const char* m_buffer;
- unsigned m_length;
};
template<>
-class StringTypeAdapter<const LChar*> {
+class StringTypeAdapter<char*> : public StringTypeAdapter<const char*> {
public:
- StringTypeAdapter<const LChar*>(const LChar* buffer)
- : m_buffer(buffer)
- , m_length(strlen(reinterpret_cast<const char*>(buffer)))
- {
- }
-
- unsigned length() { return m_length; }
-
- bool is8Bit() { return true; }
-
- void writeTo(LChar* destination)
+ StringTypeAdapter(const char* characters)
+ : StringTypeAdapter<const char*>(characters)
{
- memcpy(destination, m_buffer, static_cast<size_t>(m_length) * sizeof(LChar));
}
-
- void writeTo(UChar* destination)
- {
- StringImpl::copyChars(destination, m_buffer, m_length);
- }
-
-private:
- const LChar* m_buffer;
- unsigned m_length;
};
template<>
-class StringTypeAdapter<ASCIILiteral> {
+class StringTypeAdapter<ASCIILiteral> : public StringTypeAdapter<const char*> {
public:
- StringTypeAdapter<ASCIILiteral>(ASCIILiteral buffer)
- : m_buffer(reinterpret_cast<const LChar*>(static_cast<const char*>(buffer)))
- , m_length(strlen(buffer))
- {
- }
-
- size_t length() { return m_length; }
-
- bool is8Bit() { return true; }
-
- void writeTo(LChar* destination)
- {
- memcpy(destination, m_buffer, static_cast<size_t>(m_length));
- }
-
- void writeTo(UChar* destination)
+ StringTypeAdapter(ASCIILiteral characters)
+ : StringTypeAdapter<const char*>(characters)
{
- StringImpl::copyChars(destination, m_buffer, m_length);
}
-
-private:
- const LChar* m_buffer;
- unsigned m_length;
};
template<>
class StringTypeAdapter<Vector<char>> {
public:
- StringTypeAdapter<Vector<char>>(const Vector<char>& buffer)
- : m_buffer(buffer)
+ StringTypeAdapter(const Vector<char>& vector)
+ : m_vector(vector)
{
}
- size_t length() { return m_buffer.size(); }
-
- bool is8Bit() { return true; }
-
- void writeTo(LChar* destination)
- {
- for (size_t i = 0; i < m_buffer.size(); ++i)
- destination[i] = static_cast<unsigned char>(m_buffer[i]);
- }
+ size_t length() const { return m_vector.size(); }
+ bool is8Bit() const { return true; }
- void writeTo(UChar* destination)
- {
- for (size_t i = 0; i < m_buffer.size(); ++i)
- destination[i] = static_cast<unsigned char>(m_buffer[i]);
- }
-
-private:
- const Vector<char>& m_buffer;
-};
-
-template<>
-class StringTypeAdapter<Vector<LChar>> {
-public:
- StringTypeAdapter<Vector<LChar>>(const Vector<LChar>& buffer)
- : m_buffer(buffer)
+ void writeTo(LChar* destination) const
{
+ StringView(reinterpret_cast<const LChar*>(m_vector.data()), m_vector.size()).getCharactersWithUpconvert(destination);
}
- size_t length() { return m_buffer.size(); }
-
- bool is8Bit() { return true; }
-
- void writeTo(LChar* destination)
+ void writeTo(UChar* destination) const
{
- for (size_t i = 0; i < m_buffer.size(); ++i)
- destination[i] = m_buffer[i];
+ StringView(reinterpret_cast<const LChar*>(m_vector.data()), m_vector.size()).getCharactersWithUpconvert(destination);
}
- void writeTo(UChar* destination)
- {
- for (size_t i = 0; i < m_buffer.size(); ++i)
- destination[i] = m_buffer[i];
- }
+ String toString() const { return String(m_vector.data(), m_vector.size()); }
private:
- const Vector<LChar>& m_buffer;
+ const Vector<char>& m_vector;
};
template<>
class StringTypeAdapter<String> {
public:
StringTypeAdapter<String>(const String& string)
- : m_buffer(string)
+ : m_string(string)
{
}
- unsigned length() { return m_buffer.length(); }
+ unsigned length() const { return m_string.length(); }
+ bool is8Bit() const { return m_string.isNull() || m_string.is8Bit(); }
- bool is8Bit() { return m_buffer.isNull() || m_buffer.is8Bit(); }
-
- void writeTo(LChar* destination)
+ void writeTo(LChar* destination) const
{
- unsigned length = m_buffer.length();
-
- ASSERT(is8Bit());
- const LChar* data = m_buffer.characters8();
- for (unsigned i = 0; i < length; ++i)
- destination[i] = data[i];
-
+ StringView(m_string).getCharactersWithUpconvert(destination);
WTF_STRINGTYPEADAPTER_COPIED_WTF_STRING();
}
- void writeTo(UChar* destination)
+ void writeTo(UChar* destination) const
{
- unsigned length = m_buffer.length();
-
- if (is8Bit()) {
- const LChar* data = m_buffer.characters8();
- for (unsigned i = 0; i < length; ++i)
- destination[i] = data[i];
- } else {
- const UChar* data = m_buffer.characters16();
- for (unsigned i = 0; i < length; ++i)
- destination[i] = data[i];
- }
-
+ StringView(m_string).getCharactersWithUpconvert(destination);
WTF_STRINGTYPEADAPTER_COPIED_WTF_STRING();
}
+ String toString() const { return m_string; }
+
private:
- const String& m_buffer;
+ const String& m_string;
};
template<>
-class StringTypeAdapter<AtomicString> {
+class StringTypeAdapter<AtomicString> : public StringTypeAdapter<String> {
public:
- StringTypeAdapter<AtomicString>(const AtomicString& string)
- : m_adapter(string.string())
+ StringTypeAdapter(const AtomicString& string)
+ : StringTypeAdapter<String>(string.string())
{
}
-
- unsigned length() { return m_adapter.length(); }
-
- bool is8Bit() { return m_adapter.is8Bit(); }
-
- void writeTo(LChar* destination) { m_adapter.writeTo(destination); }
- void writeTo(UChar* destination) { m_adapter.writeTo(destination); }
-
-private:
- StringTypeAdapter<String> m_adapter;
};
-inline void sumWithOverflow(unsigned& total, unsigned addend, bool& overflow)
+inline void sumWithOverflow(bool& overflow, unsigned& total, unsigned addend)
{
unsigned oldTotal = total;
total = oldTotal + addend;
@@ -424,569 +267,97 @@ inline void sumWithOverflow(unsigned& total, unsigned addend, bool& overflow)
overflow = true;
}
-template<typename StringType1, typename StringType2>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2)
-{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
-
- bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- if (overflow)
- return 0;
-
- if (adapter1.is8Bit() && adapter2.is8Bit()) {
- LChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
-
- return resultImpl.release();
- }
-
- UChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
-
- return resultImpl.release();
-}
-
-template<typename StringType1, typename StringType2, typename StringType3>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2, StringType3 string3)
+template<typename... Unsigned>
+inline void sumWithOverflow(bool& overflow, unsigned& total, unsigned addend, Unsigned ...addends)
{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
- StringTypeAdapter<StringType3> adapter3(string3);
-
- bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- sumWithOverflow(length, adapter3.length(), overflow);
- if (overflow)
- return 0;
-
- if (adapter1.is8Bit() && adapter2.is8Bit() && adapter3.is8Bit()) {
- LChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
-
- return resultImpl.release();
- }
-
- UChar* buffer = 0;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
-
- return resultImpl.release();
+ unsigned oldTotal = total;
+ total = oldTotal + addend;
+ if (total < oldTotal)
+ overflow = true;
+ sumWithOverflow(overflow, total, addends...);
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4)
+template<typename Adapter>
+inline bool are8Bit(Adapter adapter)
{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
- StringTypeAdapter<StringType3> adapter3(string3);
- StringTypeAdapter<StringType4> adapter4(string4);
-
- bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- sumWithOverflow(length, adapter3.length(), overflow);
- sumWithOverflow(length, adapter4.length(), overflow);
- if (overflow)
- return 0;
-
- if (adapter1.is8Bit() && adapter2.is8Bit() && adapter3.is8Bit() && adapter4.is8Bit()) {
- LChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
-
- return resultImpl.release();
- }
-
- UChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
-
- return resultImpl.release();
+ return adapter.is8Bit();
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5)
+template<typename Adapter, typename... Adapters>
+inline bool are8Bit(Adapter adapter, Adapters ...adapters)
{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
- StringTypeAdapter<StringType3> adapter3(string3);
- StringTypeAdapter<StringType4> adapter4(string4);
- StringTypeAdapter<StringType5> adapter5(string5);
-
- bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- sumWithOverflow(length, adapter3.length(), overflow);
- sumWithOverflow(length, adapter4.length(), overflow);
- sumWithOverflow(length, adapter5.length(), overflow);
- if (overflow)
- return 0;
-
- if (adapter1.is8Bit() && adapter2.is8Bit() && adapter3.is8Bit() && adapter4.is8Bit() && adapter5.is8Bit()) {
- LChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
-
- return resultImpl.release();
- }
-
- UChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
-
- return resultImpl.release();
+ return adapter.is8Bit() && are8Bit(adapters...);
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6)
+template<typename ResultType, typename Adapter>
+inline void makeStringAccumulator(ResultType* result, Adapter adapter)
{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
- StringTypeAdapter<StringType3> adapter3(string3);
- StringTypeAdapter<StringType4> adapter4(string4);
- StringTypeAdapter<StringType5> adapter5(string5);
- StringTypeAdapter<StringType6> adapter6(string6);
-
- bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- sumWithOverflow(length, adapter3.length(), overflow);
- sumWithOverflow(length, adapter4.length(), overflow);
- sumWithOverflow(length, adapter5.length(), overflow);
- sumWithOverflow(length, adapter6.length(), overflow);
- if (overflow)
- return 0;
-
- if (adapter1.is8Bit() && adapter2.is8Bit() && adapter3.is8Bit() && adapter4.is8Bit() && adapter5.is8Bit() && adapter6.is8Bit()) {
- LChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
-
- return resultImpl.release();
- }
-
- UChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
-
- return resultImpl.release();
+ adapter.writeTo(result);
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6, typename StringType7>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6, StringType7 string7)
+template<typename ResultType, typename Adapter, typename... Adapters>
+inline void makeStringAccumulator(ResultType* result, Adapter adapter, Adapters ...adapters)
{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
- StringTypeAdapter<StringType3> adapter3(string3);
- StringTypeAdapter<StringType4> adapter4(string4);
- StringTypeAdapter<StringType5> adapter5(string5);
- StringTypeAdapter<StringType6> adapter6(string6);
- StringTypeAdapter<StringType7> adapter7(string7);
-
- bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- sumWithOverflow(length, adapter3.length(), overflow);
- sumWithOverflow(length, adapter4.length(), overflow);
- sumWithOverflow(length, adapter5.length(), overflow);
- sumWithOverflow(length, adapter6.length(), overflow);
- sumWithOverflow(length, adapter7.length(), overflow);
- if (overflow)
- return 0;
-
- if (adapter1.is8Bit() && adapter2.is8Bit() && adapter3.is8Bit() && adapter4.is8Bit() && adapter5.is8Bit() && adapter6.is8Bit() && adapter7.is8Bit()) {
- LChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
- result += adapter6.length();
- adapter7.writeTo(result);
-
- return resultImpl.release();
- }
-
- UChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
- result += adapter6.length();
- adapter7.writeTo(result);
-
- return resultImpl.release();
+ adapter.writeTo(result);
+ makeStringAccumulator(result + adapter.length(), adapters...);
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6, typename StringType7, typename StringType8>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6, StringType7 string7, StringType8 string8)
+template<typename StringTypeAdapter, typename... StringTypeAdapters>
+String tryMakeStringFromAdapters(StringTypeAdapter adapter, StringTypeAdapters ...adapters)
{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
- StringTypeAdapter<StringType3> adapter3(string3);
- StringTypeAdapter<StringType4> adapter4(string4);
- StringTypeAdapter<StringType5> adapter5(string5);
- StringTypeAdapter<StringType6> adapter6(string6);
- StringTypeAdapter<StringType7> adapter7(string7);
- StringTypeAdapter<StringType8> adapter8(string8);
-
bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- sumWithOverflow(length, adapter3.length(), overflow);
- sumWithOverflow(length, adapter4.length(), overflow);
- sumWithOverflow(length, adapter5.length(), overflow);
- sumWithOverflow(length, adapter6.length(), overflow);
- sumWithOverflow(length, adapter7.length(), overflow);
- sumWithOverflow(length, adapter8.length(), overflow);
+ unsigned length = adapter.length();
+ sumWithOverflow(overflow, length, adapters.length()...);
if (overflow)
- return 0;
+ return String();
- if (adapter1.is8Bit() && adapter2.is8Bit() && adapter3.is8Bit() && adapter4.is8Bit() && adapter5.is8Bit() && adapter6.is8Bit() && adapter7.is8Bit() && adapter8.is8Bit()) {
+ if (are8Bit(adapter, adapters...)) {
LChar* buffer;
RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
- result += adapter6.length();
- adapter7.writeTo(result);
- result += adapter7.length();
- adapter8.writeTo(result);
-
- return resultImpl.release();
- }
-
- UChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
- result += adapter6.length();
- adapter7.writeTo(result);
- result += adapter7.length();
- adapter8.writeTo(result);
-
- return resultImpl.release();
-}
-
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6, typename StringType7, typename StringType8, typename StringType9>
-PassRefPtr<StringImpl> tryMakeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6, StringType7 string7, StringType8 string8, StringType9 string9)
-{
- StringTypeAdapter<StringType1> adapter1(string1);
- StringTypeAdapter<StringType2> adapter2(string2);
- StringTypeAdapter<StringType3> adapter3(string3);
- StringTypeAdapter<StringType4> adapter4(string4);
- StringTypeAdapter<StringType5> adapter5(string5);
- StringTypeAdapter<StringType6> adapter6(string6);
- StringTypeAdapter<StringType7> adapter7(string7);
- StringTypeAdapter<StringType8> adapter8(string8);
- StringTypeAdapter<StringType9> adapter9(string9);
+ return String();
- bool overflow = false;
- unsigned length = adapter1.length();
- sumWithOverflow(length, adapter2.length(), overflow);
- sumWithOverflow(length, adapter3.length(), overflow);
- sumWithOverflow(length, adapter4.length(), overflow);
- sumWithOverflow(length, adapter5.length(), overflow);
- sumWithOverflow(length, adapter6.length(), overflow);
- sumWithOverflow(length, adapter7.length(), overflow);
- sumWithOverflow(length, adapter8.length(), overflow);
- sumWithOverflow(length, adapter9.length(), overflow);
- if (overflow)
- return 0;
+ makeStringAccumulator(buffer, adapter, adapters...);
- if (adapter1.is8Bit() && adapter2.is8Bit() && adapter3.is8Bit() && adapter4.is8Bit() && adapter5.is8Bit() && adapter6.is8Bit() && adapter7.is8Bit() && adapter8.is8Bit() && adapter9.is8Bit()) {
- LChar* buffer;
- RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
- if (!resultImpl)
- return 0;
-
- LChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
- result += adapter6.length();
- adapter7.writeTo(result);
- result += adapter7.length();
- adapter8.writeTo(result);
- result += adapter8.length();
- adapter9.writeTo(result);
-
- return resultImpl.release();
+ return WTFMove(resultImpl);
}
UChar* buffer;
RefPtr<StringImpl> resultImpl = StringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
- return 0;
-
- UChar* result = buffer;
- adapter1.writeTo(result);
- result += adapter1.length();
- adapter2.writeTo(result);
- result += adapter2.length();
- adapter3.writeTo(result);
- result += adapter3.length();
- adapter4.writeTo(result);
- result += adapter4.length();
- adapter5.writeTo(result);
- result += adapter5.length();
- adapter6.writeTo(result);
- result += adapter6.length();
- adapter7.writeTo(result);
- result += adapter7.length();
- adapter8.writeTo(result);
- result += adapter8.length();
- adapter9.writeTo(result);
-
- return resultImpl.release();
-}
-
-
-// Convenience only.
-template<typename StringType1>
-String makeString(StringType1 string1)
-{
- return String(string1);
-}
-
-template<typename StringType1, typename StringType2>
-String makeString(StringType1 string1, StringType2 string2)
-{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2);
- if (!resultImpl)
- CRASH();
- return resultImpl.release();
-}
-
-template<typename StringType1, typename StringType2, typename StringType3>
-String makeString(StringType1 string1, StringType2 string2, StringType3 string3)
-{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2, string3);
- if (!resultImpl)
- CRASH();
- return resultImpl.release();
-}
-
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4>
-String makeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4)
-{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2, string3, string4);
- if (!resultImpl)
- CRASH();
- return resultImpl.release();
-}
+ return String();
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5>
-String makeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5)
-{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2, string3, string4, string5);
- if (!resultImpl)
- CRASH();
- return resultImpl.release();
-}
+ makeStringAccumulator(buffer, adapter, adapters...);
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6>
-String makeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6)
-{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2, string3, string4, string5, string6);
- if (!resultImpl)
- CRASH();
- return resultImpl.release();
+ return WTFMove(resultImpl);
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6, typename StringType7>
-String makeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6, StringType7 string7)
+template<typename... StringTypes>
+String tryMakeString(StringTypes ...strings)
{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2, string3, string4, string5, string6, string7);
- if (!resultImpl)
- CRASH();
- return resultImpl.release();
+ return tryMakeStringFromAdapters(StringTypeAdapter<StringTypes>(strings)...);
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6, typename StringType7, typename StringType8>
-String makeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6, StringType7 string7, StringType8 string8)
+// Convenience only.
+template<typename StringType>
+String makeString(StringType string)
{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2, string3, string4, string5, string6, string7, string8);
- if (!resultImpl)
- CRASH();
- return resultImpl.release();
+ return String(string);
}
-template<typename StringType1, typename StringType2, typename StringType3, typename StringType4, typename StringType5, typename StringType6, typename StringType7, typename StringType8, typename StringType9>
-String makeString(StringType1 string1, StringType2 string2, StringType3 string3, StringType4 string4, StringType5 string5, StringType6 string6, StringType7 string7, StringType8 string8, StringType9 string9)
+template<typename... StringTypes>
+String makeString(StringTypes... strings)
{
- RefPtr<StringImpl> resultImpl = tryMakeString(string1, string2, string3, string4, string5, string6, string7, string8, string9);
- if (!resultImpl)
+ String result = tryMakeString(strings...);
+ if (!result)
CRASH();
- return resultImpl.release();
+ return result;
}
} // namespace WTF
using WTF::makeString;
+using WTF::tryMakeString;
#include <wtf/text/StringOperators.h>
#endif
diff --git a/Source/WTF/wtf/text/StringConcatenateNumbers.h b/Source/WTF/wtf/text/StringConcatenateNumbers.h
new file mode 100644
index 000000000..293e74504
--- /dev/null
+++ b/Source/WTF/wtf/text/StringConcatenateNumbers.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/dtoa.h>
+#include <wtf/text/IntegerToStringConversion.h>
+#include <wtf/text/StringConcatenate.h>
+
+namespace WTF {
+
+template<>
+class StringTypeAdapter<int> {
+public:
+ StringTypeAdapter<int>(int number)
+ : m_number(number)
+ {
+ }
+
+ unsigned length() const { return lengthOfNumberAsStringSigned(m_number); }
+ bool is8Bit() const { return true; }
+
+ void writeTo(LChar* destination) const { writeNumberToBufferSigned(m_number, destination); }
+ void writeTo(UChar* destination) const { writeNumberToBufferSigned(m_number, destination); }
+
+ String toString() const { return String::number(m_number); }
+
+private:
+ int m_number;
+};
+
+template<>
+class StringTypeAdapter<unsigned> {
+public:
+ StringTypeAdapter<unsigned>(unsigned number)
+ : m_number(number)
+ {
+ }
+
+ unsigned length() const { return lengthOfNumberAsStringUnsigned(m_number); }
+ bool is8Bit() const { return true; }
+
+ void writeTo(LChar* destination) const { writeNumberToBufferUnsigned(m_number, destination); }
+ void writeTo(UChar* destination) const { writeNumberToBufferUnsigned(m_number, destination); }
+
+ String toString() const { return String::number(m_number); }
+
+private:
+ unsigned m_number;
+};
+
+template<>
+class StringTypeAdapter<double> {
+public:
+ StringTypeAdapter<double>(double number)
+ {
+ numberToString(number, m_buffer);
+ m_length = strlen(m_buffer);
+ }
+
+ unsigned length() const { return m_length; }
+ bool is8Bit() const { return true; }
+
+ void writeTo(LChar* destination) const
+ {
+ for (unsigned i = 0; i < m_length; ++i)
+ destination[i] = m_buffer[i];
+ }
+
+ void writeTo(UChar* destination) const
+ {
+ for (unsigned i = 0; i < m_length; ++i)
+ destination[i] = m_buffer[i];
+ }
+
+ String toString() const { return { m_buffer, m_length }; }
+
+private:
+ NumberToStringBuffer m_buffer;
+ unsigned m_length;
+};
+
+template<>
+class StringTypeAdapter<float> : public StringTypeAdapter<double> {
+public:
+ StringTypeAdapter<float>(float number)
+ : StringTypeAdapter<double>(number)
+ {
+ }
+};
+
+class FormattedNumber {
+public:
+ static FormattedNumber fixedPrecision(double number, unsigned significantFigures = 6, bool truncateTrailingZeros = false)
+ {
+ FormattedNumber numberFormatter;
+ numberToFixedPrecisionString(number, significantFigures, numberFormatter.m_buffer, truncateTrailingZeros);
+ numberFormatter.m_length = strlen(numberFormatter.m_buffer);
+ return numberFormatter;
+ }
+
+ static FormattedNumber fixedWidth(double number, unsigned decimalPlaces)
+ {
+ FormattedNumber numberFormatter;
+ numberToFixedWidthString(number, decimalPlaces, numberFormatter.m_buffer);
+ numberFormatter.m_length = strlen(numberFormatter.m_buffer);
+ return numberFormatter;
+ }
+
+ unsigned length() const { return m_length; }
+ const LChar* buffer() const { return reinterpret_cast<const LChar*>(m_buffer); }
+
+private:
+ NumberToStringBuffer m_buffer;
+ unsigned m_length;
+};
+
+template<>
+class StringTypeAdapter<FormattedNumber> {
+public:
+ StringTypeAdapter<FormattedNumber>(const FormattedNumber& numberFormatter)
+ : m_numberFormatter(numberFormatter)
+ {
+ }
+
+ unsigned length() const { return m_numberFormatter.length(); }
+ bool is8Bit() const { return true; }
+
+ void writeTo(LChar* destination) const
+ {
+ auto buffer = m_numberFormatter.buffer();
+ auto length = m_numberFormatter.length();
+ for (unsigned i = 0; i < length; ++i)
+ destination[i] = buffer[i];
+ }
+
+ void writeTo(UChar* destination) const
+ {
+ auto buffer = m_numberFormatter.buffer();
+ auto length = m_numberFormatter.length();
+ for (unsigned i = 0; i < length; ++i)
+ destination[i] = buffer[i];
+ }
+
+ String toString() const { return { m_numberFormatter.buffer(), m_numberFormatter.length() }; }
+
+private:
+ const FormattedNumber& m_numberFormatter;
+};
+
+}
+
+using WTF::FormattedNumber;
diff --git a/Source/WTF/wtf/text/StringHash.h b/Source/WTF/wtf/text/StringHash.h
index 88bdd9369..139b5169e 100644
--- a/Source/WTF/wtf/text/StringHash.h
+++ b/Source/WTF/wtf/text/StringHash.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2007, 2008, 2012, 2013 Apple Inc. All rights reserved
+ * Copyright (C) 2006-2008, 2012-2013, 2016 Apple Inc. All rights reserved
* Copyright (C) Research In Motion Limited 2009. All rights reserved.
*
* This library is free software; you can redistribute it and/or
@@ -24,7 +24,7 @@
#include <wtf/text/AtomicString.h>
#include <wtf/HashTraits.h>
-#include <wtf/StringHasher.h>
+#include <wtf/Hasher.h>
namespace WTF {
@@ -33,7 +33,15 @@ namespace WTF {
return value.isNull();
}
- // The hash() functions on StringHash and CaseFoldingHash do not support
+ inline void HashTraits<String>::customDeleteBucket(String& value)
+ {
+ // See unique_ptr's customDeleteBucket() for an explanation.
+ ASSERT(!isDeletedValue(value));
+ String valueToBeDestroyed = WTFMove(value);
+ constructDeletedValue(value);
+ }
+
+ // The hash() functions on StringHash and ASCIICaseInsensitiveHash do not support
// null strings. get(), contains(), and add() on HashMap<String,..., StringHash>
// cause a null-pointer dereference when passed null strings.
@@ -45,7 +53,7 @@ namespace WTF {
static unsigned hash(StringImpl* key) { return key->hash(); }
static inline bool equal(const StringImpl* a, const StringImpl* b)
{
- return equalNonNull(a, b);
+ return WTF::equal(*a, *b);
}
static unsigned hash(const RefPtr<StringImpl>& key) { return key->hash(); }
@@ -71,14 +79,11 @@ namespace WTF {
static const bool safeToCompareToEmptyOrDeleted = false;
};
- class CaseFoldingHash {
+ class ASCIICaseInsensitiveHash {
public:
template<typename T> static inline UChar foldCase(T character)
{
- if (std::is_same<T, LChar>::value)
- return StringImpl::latin1CaseFoldTable[character];
-
- return u_foldCase(character, U_FOLD_CASE_DEFAULT);
+ return toASCIILower(character);
}
static unsigned hash(const UChar* data, unsigned length)
@@ -105,17 +110,23 @@ namespace WTF {
static inline unsigned hash(const char* data, unsigned length)
{
- return CaseFoldingHash::hash(reinterpret_cast<const LChar*>(data), length);
+ return hash(reinterpret_cast<const LChar*>(data), length);
}
+ static inline bool equal(const StringImpl& a, const StringImpl& b)
+ {
+ return equalIgnoringASCIICase(a, b);
+ }
static inline bool equal(const StringImpl* a, const StringImpl* b)
{
- return equalIgnoringCaseNonNull(a, b);
+ ASSERT(a);
+ ASSERT(b);
+ return equal(*a, *b);
}
static unsigned hash(const RefPtr<StringImpl>& key)
{
- return hash(*key);
+ return hash(key.get());
}
static bool equal(const RefPtr<StringImpl>& a, const RefPtr<StringImpl>& b)
@@ -167,8 +178,8 @@ namespace WTF {
}
+using WTF::ASCIICaseInsensitiveHash;
using WTF::AlreadyHashed;
-using WTF::CaseFoldingHash;
using WTF::StringHash;
#endif
diff --git a/Source/WTF/wtf/text/StringImpl.cpp b/Source/WTF/wtf/text/StringImpl.cpp
index 34794258c..ee66daf25 100644
--- a/Source/WTF/wtf/text/StringImpl.cpp
+++ b/Source/WTF/wtf/text/StringImpl.cpp
@@ -2,7 +2,7 @@
* Copyright (C) 1999 Lars Knoll (knoll@kde.org)
* (C) 1999 Antti Koivisto (koivisto@kde.org)
* (C) 2001 Dirk Mueller ( mueller@kde.org )
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2009, 2013-2016 Apple Inc. All rights reserved.
* Copyright (C) 2006 Andrew Wellington (proton@wiretapped.net)
*
* This library is free software; you can redistribute it and/or
@@ -30,12 +30,14 @@
#include "StringHash.h"
#include <wtf/ProcessID.h>
#include <wtf/StdLibExtras.h>
-#include <wtf/WTFThreadData.h>
#include <wtf/text/CString.h>
+#include <wtf/text/StringView.h>
+#include <wtf/text/SymbolImpl.h>
+#include <wtf/text/SymbolRegistry.h>
#include <wtf/unicode/CharacterNames.h>
#include <wtf/unicode/UTF8.h>
-#ifdef STRING_STATS
+#if STRING_STATS
#include <unistd.h>
#include <wtf/DataLog.h>
#endif
@@ -44,27 +46,21 @@ namespace WTF {
using namespace Unicode;
-COMPILE_ASSERT(sizeof(StringImpl) == 2 * sizeof(int) + 3 * sizeof(void*), StringImpl_should_stay_small);
+static_assert(sizeof(StringImpl) == 2 * sizeof(int) + 2 * sizeof(void*), "StringImpl should stay small");
-#ifdef STRING_STATS
+#if STRING_STATS
StringStats StringImpl::m_stringStats;
-unsigned StringStats::s_stringRemovesTillPrintStats = StringStats::s_printStringStatsFrequency;
+std::atomic<unsigned> StringStats::s_stringRemovesTillPrintStats(s_printStringStatsFrequency);
-void StringStats::removeString(StringImpl* string)
+void StringStats::removeString(StringImpl& string)
{
- unsigned length = string->length();
- bool isSubString = string->isSubString();
+ unsigned length = string.length();
+ bool isSubString = string.isSubString();
--m_totalNumberStrings;
- if (string->has16BitShadow()) {
- --m_numberUpconvertedStrings;
- if (!isSubString)
- m_totalUpconvertedData -= length;
- }
-
- if (string->is8Bit()) {
+ if (string.is8Bit()) {
--m_number8BitStrings;
if (!isSubString)
m_total8BitData -= length;
@@ -87,46 +83,46 @@ void StringStats::printStats()
unsigned long long totalNumberCharacters = m_total8BitData + m_total16BitData;
double percent8Bit = m_totalNumberStrings ? ((double)m_number8BitStrings * 100) / (double)m_totalNumberStrings : 0.0;
double average8bitLength = m_number8BitStrings ? (double)m_total8BitData / (double)m_number8BitStrings : 0.0;
- dataLogF("%8u (%5.2f%%) 8 bit %12llu chars %12llu bytes avg length %6.1f\n", m_number8BitStrings, percent8Bit, m_total8BitData, m_total8BitData, average8bitLength);
+ dataLogF("%8u (%5.2f%%) 8 bit %12llu chars %12llu bytes avg length %6.1f\n", m_number8BitStrings.load(), percent8Bit, m_total8BitData.load(), m_total8BitData.load(), average8bitLength);
double percent16Bit = m_totalNumberStrings ? ((double)m_number16BitStrings * 100) / (double)m_totalNumberStrings : 0.0;
double average16bitLength = m_number16BitStrings ? (double)m_total16BitData / (double)m_number16BitStrings : 0.0;
- dataLogF("%8u (%5.2f%%) 16 bit %12llu chars %12llu bytes avg length %6.1f\n", m_number16BitStrings, percent16Bit, m_total16BitData, m_total16BitData * 2, average16bitLength);
-
- double percentUpconverted = m_totalNumberStrings ? ((double)m_numberUpconvertedStrings * 100) / (double)m_number8BitStrings : 0.0;
- double averageUpconvertedLength = m_numberUpconvertedStrings ? (double)m_totalUpconvertedData / (double)m_numberUpconvertedStrings : 0.0;
- dataLogF("%8u (%5.2f%%) upconverted %12llu chars %12llu bytes avg length %6.1f\n", m_numberUpconvertedStrings, percentUpconverted, m_totalUpconvertedData, m_totalUpconvertedData * 2, averageUpconvertedLength);
+ dataLogF("%8u (%5.2f%%) 16 bit %12llu chars %12llu bytes avg length %6.1f\n", m_number16BitStrings.load(), percent16Bit, m_total16BitData.load(), m_total16BitData * 2, average16bitLength);
double averageLength = m_totalNumberStrings ? (double)totalNumberCharacters / (double)m_totalNumberStrings : 0.0;
- unsigned long long totalDataBytes = m_total8BitData + (m_total16BitData + m_totalUpconvertedData) * 2;
- dataLogF("%8u Total %12llu chars %12llu bytes avg length %6.1f\n", m_totalNumberStrings, totalNumberCharacters, totalDataBytes, averageLength);
- unsigned long long totalSavedBytes = m_total8BitData - m_totalUpconvertedData;
+ unsigned long long totalDataBytes = m_total8BitData + m_total16BitData * 2;
+ dataLogF("%8u Total %12llu chars %12llu bytes avg length %6.1f\n", m_totalNumberStrings.load(), totalNumberCharacters, totalDataBytes, averageLength);
+ unsigned long long totalSavedBytes = m_total8BitData;
double percentSavings = totalSavedBytes ? ((double)totalSavedBytes * 100) / (double)(totalDataBytes + totalSavedBytes) : 0.0;
dataLogF(" Total savings %12llu bytes (%5.2f%%)\n", totalSavedBytes, percentSavings);
+
+ dataLogF("%8u StringImpl::ref calls\n", m_refCalls.load());
+ dataLogF("%8u StringImpl::deref calls\n", m_derefCalls.load());
}
#endif
+StringImpl::StaticStringImpl StringImpl::s_atomicEmptyString("", StringImpl::StringAtomic);
StringImpl::~StringImpl()
{
ASSERT(!isStatic());
- STRING_STATS_REMOVE_STRING(this);
+ StringView::invalidate(*this);
- if (isAtomic())
- AtomicString::remove(this);
- if (isIdentifier()) {
- if (!wtfThreadData().currentIdentifierTable()->remove(this))
- CRASH();
- }
+ STRING_STATS_REMOVE_STRING(*this);
- BufferOwnership ownership = bufferOwnership();
+ if (isAtomic() && length() && !isSymbol())
+ AtomicStringImpl::remove(static_cast<AtomicStringImpl*>(this));
- if (has16BitShadow()) {
- ASSERT(m_copyData16);
- fastFree(m_copyData16);
+ if (isSymbol()) {
+ auto& symbol = static_cast<SymbolImpl&>(*this);
+ auto* symbolRegistry = symbol.symbolRegistry();
+ if (symbolRegistry)
+ symbolRegistry->remove(symbol);
}
+ BufferOwnership ownership = bufferOwnership();
+
if (ownership == BufferInternal)
return;
if (ownership == BufferOwned) {
@@ -137,8 +133,8 @@ StringImpl::~StringImpl()
}
ASSERT(ownership == BufferSubstring);
- ASSERT(m_substringBuffer);
- m_substringBuffer->deref();
+ ASSERT(substringBuffer());
+ substringBuffer()->deref();
}
void StringImpl::destroy(StringImpl* stringImpl)
@@ -147,19 +143,19 @@ void StringImpl::destroy(StringImpl* stringImpl)
fastFree(stringImpl);
}
-PassRef<StringImpl> StringImpl::createFromLiteral(const char* characters, unsigned length)
+Ref<StringImpl> StringImpl::createFromLiteral(const char* characters, unsigned length)
{
ASSERT_WITH_MESSAGE(length, "Use StringImpl::empty() to create an empty string");
ASSERT(charactersAreAllASCII<LChar>(reinterpret_cast<const LChar*>(characters), length));
return adoptRef(*new StringImpl(reinterpret_cast<const LChar*>(characters), length, ConstructWithoutCopying));
}
-PassRef<StringImpl> StringImpl::createFromLiteral(const char* characters)
+Ref<StringImpl> StringImpl::createFromLiteral(const char* characters)
{
return createFromLiteral(characters, strlen(characters));
}
-PassRef<StringImpl> StringImpl::createWithoutCopying(const UChar* characters, unsigned length)
+Ref<StringImpl> StringImpl::createWithoutCopying(const UChar* characters, unsigned length)
{
if (!length)
return *empty();
@@ -167,7 +163,7 @@ PassRef<StringImpl> StringImpl::createWithoutCopying(const UChar* characters, un
return adoptRef(*new StringImpl(characters, length, ConstructWithoutCopying));
}
-PassRef<StringImpl> StringImpl::createWithoutCopying(const LChar* characters, unsigned length)
+Ref<StringImpl> StringImpl::createWithoutCopying(const LChar* characters, unsigned length)
{
if (!length)
return *empty();
@@ -176,7 +172,7 @@ PassRef<StringImpl> StringImpl::createWithoutCopying(const LChar* characters, un
}
template <typename CharType>
-inline PassRef<StringImpl> StringImpl::createUninitializedInternal(unsigned length, CharType*& data)
+inline Ref<StringImpl> StringImpl::createUninitializedInternal(unsigned length, CharType*& data)
{
if (!length) {
data = 0;
@@ -186,7 +182,7 @@ inline PassRef<StringImpl> StringImpl::createUninitializedInternal(unsigned leng
}
template <typename CharType>
-inline PassRef<StringImpl> StringImpl::createUninitializedInternalNonEmpty(unsigned length, CharType*& data)
+inline Ref<StringImpl> StringImpl::createUninitializedInternalNonEmpty(unsigned length, CharType*& data)
{
ASSERT(length);
@@ -195,26 +191,25 @@ inline PassRef<StringImpl> StringImpl::createUninitializedInternalNonEmpty(unsig
// heap allocation from this call.
if (length > ((std::numeric_limits<unsigned>::max() - sizeof(StringImpl)) / sizeof(CharType)))
CRASH();
- size_t size = sizeof(StringImpl) + length * sizeof(CharType);
- StringImpl* string = static_cast<StringImpl*>(fastMalloc(size));
+ StringImpl* string = static_cast<StringImpl*>(fastMalloc(allocationSize<CharType>(length)));
- data = reinterpret_cast<CharType*>(string + 1);
+ data = string->tailPointer<CharType>();
return constructInternal<CharType>(string, length);
}
-PassRef<StringImpl> StringImpl::createUninitialized(unsigned length, LChar*& data)
+Ref<StringImpl> StringImpl::createUninitialized(unsigned length, LChar*& data)
{
return createUninitializedInternal(length, data);
}
-PassRef<StringImpl> StringImpl::createUninitialized(unsigned length, UChar*& data)
+Ref<StringImpl> StringImpl::createUninitialized(unsigned length, UChar*& data)
{
return createUninitializedInternal(length, data);
}
template <typename CharType>
-inline PassRef<StringImpl> StringImpl::reallocateInternal(PassRefPtr<StringImpl> originalString, unsigned length, CharType*& data)
-{
+inline Ref<StringImpl> StringImpl::reallocateInternal(Ref<StringImpl>&& originalString, unsigned length, CharType*& data)
+{
ASSERT(originalString->hasOneRef());
ASSERT(originalString->bufferOwnership() == BufferInternal);
@@ -226,28 +221,28 @@ inline PassRef<StringImpl> StringImpl::reallocateInternal(PassRefPtr<StringImpl>
// Same as createUninitialized() except here we use fastRealloc.
if (length > ((std::numeric_limits<unsigned>::max() - sizeof(StringImpl)) / sizeof(CharType)))
CRASH();
- size_t size = sizeof(StringImpl) + length * sizeof(CharType);
+
originalString->~StringImpl();
- StringImpl* string = static_cast<StringImpl*>(fastRealloc(originalString.leakRef(), size));
+ auto* string = static_cast<StringImpl*>(fastRealloc(&originalString.leakRef(), allocationSize<CharType>(length)));
- data = reinterpret_cast<CharType*>(string + 1);
+ data = string->tailPointer<CharType>();
return constructInternal<CharType>(string, length);
}
-PassRef<StringImpl> StringImpl::reallocate(PassRefPtr<StringImpl> originalString, unsigned length, LChar*& data)
+Ref<StringImpl> StringImpl::reallocate(Ref<StringImpl>&& originalString, unsigned length, LChar*& data)
{
ASSERT(originalString->is8Bit());
- return reallocateInternal(originalString, length, data);
+ return reallocateInternal(WTFMove(originalString), length, data);
}
-PassRef<StringImpl> StringImpl::reallocate(PassRefPtr<StringImpl> originalString, unsigned length, UChar*& data)
+Ref<StringImpl> StringImpl::reallocate(Ref<StringImpl>&& originalString, unsigned length, UChar*& data)
{
ASSERT(!originalString->is8Bit());
- return reallocateInternal(originalString, length, data);
+ return reallocateInternal(WTFMove(originalString), length, data);
}
template <typename CharType>
-inline PassRef<StringImpl> StringImpl::createInternal(const CharType* characters, unsigned length)
+inline Ref<StringImpl> StringImpl::createInternal(const CharType* characters, unsigned length)
{
if (!characters || !length)
return *empty();
@@ -258,23 +253,23 @@ inline PassRef<StringImpl> StringImpl::createInternal(const CharType* characters
return string;
}
-PassRef<StringImpl> StringImpl::create(const UChar* characters, unsigned length)
+Ref<StringImpl> StringImpl::create(const UChar* characters, unsigned length)
{
return createInternal(characters, length);
}
-PassRef<StringImpl> StringImpl::create(const LChar* characters, unsigned length)
+Ref<StringImpl> StringImpl::create(const LChar* characters, unsigned length)
{
return createInternal(characters, length);
}
-PassRef<StringImpl> StringImpl::create8BitIfPossible(const UChar* characters, unsigned length)
+Ref<StringImpl> StringImpl::create8BitIfPossible(const UChar* characters, unsigned length)
{
if (!characters || !length)
return *empty();
LChar* data;
- RefPtr<StringImpl> string = createUninitializedInternalNonEmpty(length, data);
+ auto string = createUninitializedInternalNonEmpty(length, data);
for (size_t i = 0; i < length; ++i) {
if (characters[i] & 0xff00)
@@ -282,15 +277,15 @@ PassRef<StringImpl> StringImpl::create8BitIfPossible(const UChar* characters, un
data[i] = static_cast<LChar>(characters[i]);
}
- return string.releaseNonNull();
+ return string;
}
-PassRef<StringImpl> StringImpl::create8BitIfPossible(const UChar* string)
+Ref<StringImpl> StringImpl::create8BitIfPossible(const UChar* string)
{
return StringImpl::create8BitIfPossible(string, lengthOfNullTerminatedString(string));
}
-PassRef<StringImpl> StringImpl::create(const LChar* string)
+Ref<StringImpl> StringImpl::create(const LChar* string)
{
if (!string)
return *empty();
@@ -300,41 +295,6 @@ PassRef<StringImpl> StringImpl::create(const LChar* string)
return create(string, length);
}
-const UChar* StringImpl::getData16SlowCase() const
-{
- if (has16BitShadow())
- return m_copyData16;
-
- if (bufferOwnership() == BufferSubstring) {
- // If this is a substring, return a pointer into the parent string.
- // TODO: Consider severing this string from the parent string
- unsigned offset = m_data8 - m_substringBuffer->characters8();
- return m_substringBuffer->deprecatedCharacters() + offset;
- }
-
- STRING_STATS_ADD_UPCONVERTED_STRING(m_length);
-
- unsigned len = length();
-
- m_copyData16 = static_cast<UChar*>(fastMalloc(len * sizeof(UChar)));
-
- m_hashAndFlags |= s_hashFlagHas16BitShadow;
-
- upconvertCharacters(0, len);
-
- return m_copyData16;
-}
-
-void StringImpl::upconvertCharacters(unsigned start, unsigned end) const
-{
- ASSERT(is8Bit());
- ASSERT(has16BitShadow());
-
- for (size_t i = start; i < end; ++i)
- m_copyData16[i] = m_data8[i];
-}
-
-
bool StringImpl::containsOnlyWhitespace()
{
// FIXME: The definition of whitespace here includes a number of characters
@@ -358,7 +318,7 @@ bool StringImpl::containsOnlyWhitespace()
return true;
}
-PassRef<StringImpl> StringImpl::substring(unsigned start, unsigned length)
+Ref<StringImpl> StringImpl::substring(unsigned start, unsigned length)
{
if (start >= m_length)
return *empty();
@@ -385,42 +345,23 @@ UChar32 StringImpl::characterStartingAt(unsigned i)
return 0;
}
-PassRef<StringImpl> StringImpl::lower()
+Ref<StringImpl> StringImpl::convertToLowercaseWithoutLocale()
{
- // Note: This is a hot function in the Dromaeo benchmark, specifically the
- // no-op code path up through the first 'return' statement.
+ // Note: At one time this was a hot function in the Dromaeo benchmark, specifically the
+ // no-op code path that may return ourself if we find no upper case letters and no invalid
+ // ASCII letters.
// First scan the string for uppercase and non-ASCII characters:
if (is8Bit()) {
- unsigned failingIndex;
for (unsigned i = 0; i < m_length; ++i) {
LChar character = m_data8[i];
- if (UNLIKELY((character & ~0x7F) || isASCIIUpper(character))) {
- failingIndex = i;
- goto SlowPath8bitLower;
- }
- }
- return *this;
-
-SlowPath8bitLower:
- LChar* data8;
- auto newImpl = createUninitializedInternalNonEmpty(m_length, data8);
-
- for (unsigned i = 0; i < failingIndex; ++i)
- data8[i] = m_data8[i];
-
- for (unsigned i = failingIndex; i < m_length; ++i) {
- LChar character = m_data8[i];
- if (!(character & ~0x7F))
- data8[i] = toASCIILower(character);
- else {
- ASSERT(u_tolower(character) <= 0xFF);
- data8[i] = static_cast<LChar>(u_tolower(character));
- }
+ if (UNLIKELY((character & ~0x7F) || isASCIIUpper(character)))
+ return convertToLowercaseWithoutLocaleStartingAtFailingIndex8Bit(i);
}
- return newImpl;
+ return *this;
}
+
bool noUpper = true;
unsigned ored = 0;
@@ -451,26 +392,51 @@ SlowPath8bitLower:
// Do a slower implementation for cases that include non-ASCII characters.
UChar* data16;
- RefPtr<StringImpl> newImpl = createUninitializedInternalNonEmpty(m_length, data16);
+ auto newImpl = createUninitializedInternalNonEmpty(m_length, data16);
UErrorCode status = U_ZERO_ERROR;
int32_t realLength = u_strToLower(data16, length, m_data16, m_length, "", &status);
if (U_SUCCESS(status) && realLength == length)
- return newImpl.releaseNonNull();
+ return newImpl;
newImpl = createUninitialized(realLength, data16);
status = U_ZERO_ERROR;
u_strToLower(data16, realLength, m_data16, m_length, "", &status);
if (U_FAILURE(status))
return *this;
- return newImpl.releaseNonNull();
+ return newImpl;
+}
+
+Ref<StringImpl> StringImpl::convertToLowercaseWithoutLocaleStartingAtFailingIndex8Bit(unsigned failingIndex)
+{
+ ASSERT(is8Bit());
+ LChar* data8;
+ auto newImpl = createUninitializedInternalNonEmpty(m_length, data8);
+
+ for (unsigned i = 0; i < failingIndex; ++i) {
+ ASSERT(!(m_data8[i] & ~0x7F) && !isASCIIUpper(m_data8[i]));
+ data8[i] = m_data8[i];
+ }
+
+ for (unsigned i = failingIndex; i < m_length; ++i) {
+ LChar character = m_data8[i];
+ if (!(character & ~0x7F))
+ data8[i] = toASCIILower(character);
+ else {
+ ASSERT(u_tolower(character) <= 0xFF);
+ data8[i] = static_cast<LChar>(u_tolower(character));
+ }
+ }
+
+ return newImpl;
}
-PassRef<StringImpl> StringImpl::upper()
+Ref<StringImpl> StringImpl::convertToUppercaseWithoutLocale()
{
- // This function could be optimized for no-op cases the way lower() is,
- // but in empirical testing, few actual calls to upper() are no-ops, so
- // it wouldn't be worth the extra time for pre-scanning.
+ // This function could be optimized for no-op cases the way
+ // convertToLowercaseWithoutLocale() is, but in empirical testing,
+ // few actual calls to upper() are no-ops, so it wouldn't be worth
+ // the extra time for pre-scanning.
if (m_length > static_cast<unsigned>(std::numeric_limits<int32_t>::max()))
CRASH();
@@ -478,30 +444,23 @@ PassRef<StringImpl> StringImpl::upper()
if (is8Bit()) {
LChar* data8;
- RefPtr<StringImpl> newImpl = createUninitialized(m_length, data8);
+ auto newImpl = createUninitialized(m_length, data8);
// Do a faster loop for the case where all the characters are ASCII.
unsigned ored = 0;
for (int i = 0; i < length; ++i) {
LChar c = m_data8[i];
ored |= c;
-#if CPU(X86) && defined(_MSC_VER) && _MSC_VER >=1700
- // Workaround for an MSVC 2012 x86 optimizer bug. Remove once the bug is fixed.
- // See https://connect.microsoft.com/VisualStudio/feedback/details/780362/optimization-bug-of-range-comparison
- // for more details.
- data8[i] = c >= 'a' && c <= 'z' ? c & ~0x20 : c;
-#else
data8[i] = toASCIIUpper(c);
-#endif
}
if (!(ored & ~0x7F))
- return newImpl.releaseNonNull();
+ return newImpl;
// Do a slower implementation for cases that include non-ASCII Latin-1 characters.
int numberSharpSCharacters = 0;
// There are two special cases.
- // 1. latin-1 characters when converted to upper case are 16 bit characters.
+ // 1. Some Latin-1 characters when converted to upper case are 16 bit characters.
// 2. Lower case sharp-S converts to "SS" (two characters)
for (int32_t i = 0; i < length; ++i) {
LChar c = m_data8[i];
@@ -509,7 +468,7 @@ PassRef<StringImpl> StringImpl::upper()
++numberSharpSCharacters;
ASSERT(u_toupper(c) <= 0xFFFF);
UChar upper = u_toupper(c);
- if (UNLIKELY(upper > 0xff)) {
+ if (UNLIKELY(upper > 0xFF)) {
// Since this upper-cased character does not fit in an 8-bit string, we need to take the 16-bit path.
goto upconvert;
}
@@ -517,7 +476,7 @@ PassRef<StringImpl> StringImpl::upper()
}
if (!numberSharpSCharacters)
- return newImpl.releaseNonNull();
+ return newImpl;
// We have numberSSCharacters sharp-s characters, but none of the other special characters.
newImpl = createUninitialized(m_length + numberSharpSCharacters, data8);
@@ -535,14 +494,15 @@ PassRef<StringImpl> StringImpl::upper()
}
}
- return newImpl.releaseNonNull();
+ return newImpl;
}
upconvert:
- const UChar* source16 = deprecatedCharacters();
+ auto upconvertedCharacters = StringView(*this).upconvertedCharacters();
+ const UChar* source16 = upconvertedCharacters;
UChar* data16;
- RefPtr<StringImpl> newImpl = createUninitialized(m_length, data16);
+ auto newImpl = createUninitialized(m_length, data16);
// Do a faster loop for the case where all the characters are ASCII.
unsigned ored = 0;
@@ -552,19 +512,19 @@ upconvert:
data16[i] = toASCIIUpper(c);
}
if (!(ored & ~0x7F))
- return newImpl.releaseNonNull();
+ return newImpl;
// Do a slower implementation for cases that include non-ASCII characters.
UErrorCode status = U_ZERO_ERROR;
int32_t realLength = u_strToUpper(data16, length, source16, m_length, "", &status);
if (U_SUCCESS(status) && realLength == length)
- return newImpl.releaseNonNull();
+ return newImpl;
newImpl = createUninitialized(realLength, data16);
status = U_ZERO_ERROR;
u_strToUpper(data16, realLength, source16, m_length, "", &status);
if (U_FAILURE(status))
return *this;
- return newImpl.releaseNonNull();
+ return newImpl;
}
static inline bool needsTurkishCasingRules(const AtomicString& localeIdentifier)
@@ -577,14 +537,14 @@ static inline bool needsTurkishCasingRules(const AtomicString& localeIdentifier)
&& (localeIdentifier.length() == 2 || localeIdentifier[2] == '-');
}
-PassRef<StringImpl> StringImpl::lower(const AtomicString& localeIdentifier)
+Ref<StringImpl> StringImpl::convertToLowercaseWithLocale(const AtomicString& localeIdentifier)
{
// Use the more-optimized code path most of the time.
// Assuming here that the only locale-specific lowercasing is the Turkish casing rules.
// FIXME: Could possibly optimize further by looking for the specific sequences
// that have locale-specific lowercasing. There are only three of them.
if (!needsTurkishCasingRules(localeIdentifier))
- return lower();
+ return convertToLowercaseWithoutLocale();
// FIXME: Could share more code with the main StringImpl::lower by factoring out
// this last part into a shared function that takes a locale string, since this is
@@ -597,28 +557,29 @@ PassRef<StringImpl> StringImpl::lower(const AtomicString& localeIdentifier)
// Below, we pass in the hardcoded locale "tr". Passing that is more efficient than
// allocating memory just to turn localeIdentifier into a C string, and we assume
// there is no difference between the uppercasing for "tr" and "az" locales.
- const UChar* source16 = deprecatedCharacters();
+ auto upconvertedCharacters = StringView(*this).upconvertedCharacters();
+ const UChar* source16 = upconvertedCharacters;
UChar* data16;
- RefPtr<StringImpl> newString = createUninitialized(length, data16);
+ auto newString = createUninitialized(length, data16);
UErrorCode status = U_ZERO_ERROR;
int realLength = u_strToLower(data16, length, source16, length, "tr", &status);
if (U_SUCCESS(status) && realLength == length)
- return newString.releaseNonNull();
+ return newString;
newString = createUninitialized(realLength, data16);
status = U_ZERO_ERROR;
u_strToLower(data16, realLength, source16, length, "tr", &status);
if (U_FAILURE(status))
return *this;
- return newString.releaseNonNull();
+ return newString;
}
-PassRef<StringImpl> StringImpl::upper(const AtomicString& localeIdentifier)
+Ref<StringImpl> StringImpl::convertToUppercaseWithLocale(const AtomicString& localeIdentifier)
{
// Use the more-optimized code path most of the time.
// Assuming here that the only locale-specific lowercasing is the Turkish casing rules,
// and that the only affected character is lowercase "i".
if (!needsTurkishCasingRules(localeIdentifier) || find('i') == notFound)
- return upper();
+ return convertToUppercaseWithoutLocale();
if (m_length > static_cast<unsigned>(std::numeric_limits<int32_t>::max()))
CRASH();
@@ -627,95 +588,145 @@ PassRef<StringImpl> StringImpl::upper(const AtomicString& localeIdentifier)
// Below, we pass in the hardcoded locale "tr". Passing that is more efficient than
// allocating memory just to turn localeIdentifier into a C string, and we assume
// there is no difference between the uppercasing for "tr" and "az" locales.
- const UChar* source16 = deprecatedCharacters();
+ auto upconvertedCharacters = StringView(*this).upconvertedCharacters();
+ const UChar* source16 = upconvertedCharacters;
UChar* data16;
- RefPtr<StringImpl> newString = createUninitialized(length, data16);
+ auto newString = createUninitialized(length, data16);
UErrorCode status = U_ZERO_ERROR;
int realLength = u_strToUpper(data16, length, source16, length, "tr", &status);
if (U_SUCCESS(status) && realLength == length)
- return newString.releaseNonNull();
+ return newString;
newString = createUninitialized(realLength, data16);
status = U_ZERO_ERROR;
u_strToUpper(data16, realLength, source16, length, "tr", &status);
if (U_FAILURE(status))
return *this;
- return newString.releaseNonNull();
-}
-
-PassRef<StringImpl> StringImpl::fill(UChar character)
-{
- if (!(character & ~0x7F)) {
- LChar* data;
- auto newImpl = createUninitialized(m_length, data);
- for (unsigned i = 0; i < m_length; ++i)
- data[i] = character;
- return newImpl;
- }
- UChar* data;
- auto newImpl = createUninitialized(m_length, data);
- for (unsigned i = 0; i < m_length; ++i)
- data[i] = character;
- return newImpl;
+ return newString;
}
-PassRef<StringImpl> StringImpl::foldCase()
+Ref<StringImpl> StringImpl::foldCase()
{
- if (m_length > static_cast<unsigned>(std::numeric_limits<int32_t>::max()))
- CRASH();
- int32_t length = m_length;
-
if (is8Bit()) {
- // Do a faster loop for the case where all the characters are ASCII.
- LChar* data;
- auto newImpl = createUninitialized(m_length, data);
- LChar ored = 0;
-
- for (int32_t i = 0; i < length; ++i) {
- LChar c = m_data8[i];
- data[i] = toASCIILower(c);
- ored |= c;
+ unsigned failingIndex;
+ for (unsigned i = 0; i < m_length; ++i) {
+ auto character = m_data8[i];
+ if (UNLIKELY(!isASCII(character) || isASCIIUpper(character))) {
+ failingIndex = i;
+ goto SlowPath;
+ }
}
+ // String was all ASCII and no uppercase, so just return as-is.
+ return *this;
- if (!(ored & ~0x7F))
- return newImpl;
-
- // Do a slower implementation for cases that include non-ASCII Latin-1 characters.
- // FIXME: Shouldn't this use u_foldCase instead of u_tolower?
- for (int32_t i = 0; i < length; ++i) {
- ASSERT(u_tolower(m_data8[i]) <= 0xFF);
- data[i] = static_cast<LChar>(u_tolower(m_data8[i]));
+SlowPath:
+ bool need16BitCharacters = false;
+ for (unsigned i = failingIndex; i < m_length; ++i) {
+ auto character = m_data8[i];
+ if (character == 0xB5 || character == 0xDF) {
+ need16BitCharacters = true;
+ break;
+ }
}
- return newImpl;
+ if (!need16BitCharacters) {
+ LChar* data8;
+ auto folded = createUninitializedInternalNonEmpty(m_length, data8);
+ for (unsigned i = 0; i < failingIndex; ++i)
+ data8[i] = m_data8[i];
+ for (unsigned i = failingIndex; i < m_length; ++i) {
+ auto character = m_data8[i];
+ if (isASCII(character))
+ data8[i] = toASCIILower(character);
+ else {
+ ASSERT(u_foldCase(character, U_FOLD_CASE_DEFAULT) <= 0xFF);
+ data8[i] = static_cast<LChar>(u_foldCase(character, U_FOLD_CASE_DEFAULT));
+ }
+ }
+ return folded;
+ }
+ } else {
+ // FIXME: Unclear why we use goto in the 8-bit case, and a different approach in the 16-bit case.
+ bool noUpper = true;
+ unsigned ored = 0;
+ for (unsigned i = 0; i < m_length; ++i) {
+ UChar character = m_data16[i];
+ if (UNLIKELY(isASCIIUpper(character)))
+ noUpper = false;
+ ored |= character;
+ }
+ if (!(ored & ~0x7F)) {
+ if (noUpper) {
+ // String was all ASCII and no uppercase, so just return as-is.
+ return *this;
+ }
+ UChar* data16;
+ auto folded = createUninitializedInternalNonEmpty(m_length, data16);
+ for (unsigned i = 0; i < m_length; ++i)
+ data16[i] = toASCIILower(m_data16[i]);
+ return folded;
+ }
}
- // Do a faster loop for the case where all the characters are ASCII.
- UChar* data;
- RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
- UChar ored = 0;
- for (int32_t i = 0; i < length; ++i) {
- UChar c = m_data16[i];
- ored |= c;
- data[i] = toASCIILower(c);
- }
- if (!(ored & ~0x7F))
- return newImpl.releaseNonNull();
+ if (m_length > static_cast<unsigned>(std::numeric_limits<int32_t>::max()))
+ CRASH();
- // Do a slower implementation for cases that include non-ASCII characters.
+ auto upconvertedCharacters = StringView(*this).upconvertedCharacters();
+
+ UChar* data;
+ auto folded = createUninitializedInternalNonEmpty(m_length, data);
+ int32_t length = m_length;
UErrorCode status = U_ZERO_ERROR;
- int32_t realLength = u_strFoldCase(data, length, m_data16, m_length, U_FOLD_CASE_DEFAULT, &status);
+ int32_t realLength = u_strFoldCase(data, length, upconvertedCharacters, length, U_FOLD_CASE_DEFAULT, &status);
if (U_SUCCESS(status) && realLength == length)
- return newImpl.releaseNonNull();
- newImpl = createUninitialized(realLength, data);
+ return folded;
+ ASSERT(realLength > length);
+ folded = createUninitializedInternalNonEmpty(realLength, data);
status = U_ZERO_ERROR;
- u_strFoldCase(data, realLength, m_data16, m_length, U_FOLD_CASE_DEFAULT, &status);
+ u_strFoldCase(data, realLength, upconvertedCharacters, length, U_FOLD_CASE_DEFAULT, &status);
if (U_FAILURE(status))
return *this;
- return newImpl.releaseNonNull();
+ return folded;
+}
+
+template<StringImpl::CaseConvertType type, typename CharacterType>
+ALWAYS_INLINE Ref<StringImpl> StringImpl::convertASCIICase(StringImpl& impl, const CharacterType* data, unsigned length)
+{
+ unsigned failingIndex;
+ for (unsigned i = 0; i < length; ++i) {
+ CharacterType character = data[i];
+ if (type == CaseConvertType::Lower ? UNLIKELY(isASCIIUpper(character)) : LIKELY(isASCIILower(character))) {
+ failingIndex = i;
+ goto SlowPath;
+ }
+ }
+ return impl;
+
+SlowPath:
+ CharacterType* newData;
+ auto newImpl = createUninitializedInternalNonEmpty(length, newData);
+ for (unsigned i = 0; i < failingIndex; ++i)
+ newData[i] = data[i];
+ for (unsigned i = failingIndex; i < length; ++i)
+ newData[i] = type == CaseConvertType::Lower ? toASCIILower(data[i]) : toASCIIUpper(data[i]);
+ return newImpl;
+}
+
+Ref<StringImpl> StringImpl::convertToASCIILowercase()
+{
+ if (is8Bit())
+ return convertASCIICase<CaseConvertType::Lower>(*this, m_data8, m_length);
+ return convertASCIICase<CaseConvertType::Lower>(*this, m_data16, m_length);
+}
+
+Ref<StringImpl> StringImpl::convertToASCIIUppercase()
+{
+ if (is8Bit())
+ return convertASCIICase<CaseConvertType::Upper>(*this, m_data8, m_length);
+ return convertASCIICase<CaseConvertType::Upper>(*this, m_data16, m_length);
}
template <class UCharPredicate>
-inline PassRef<StringImpl> StringImpl::stripMatchedCharacters(UCharPredicate predicate)
+inline Ref<StringImpl> StringImpl::stripMatchedCharacters(UCharPredicate predicate)
{
if (!m_length)
return *this;
@@ -763,18 +774,18 @@ public:
}
};
-PassRef<StringImpl> StringImpl::stripWhiteSpace()
+Ref<StringImpl> StringImpl::stripWhiteSpace()
{
return stripMatchedCharacters(SpaceOrNewlinePredicate());
}
-PassRef<StringImpl> StringImpl::stripWhiteSpace(IsWhiteSpaceFunctionPtr isWhiteSpace)
+Ref<StringImpl> StringImpl::stripWhiteSpace(IsWhiteSpaceFunctionPtr isWhiteSpace)
{
return stripMatchedCharacters(UCharPredicate(isWhiteSpace));
}
template <typename CharType>
-ALWAYS_INLINE PassRef<StringImpl> StringImpl::removeCharacters(const CharType* characters, CharacterMatchFunctionPtr findMatch)
+ALWAYS_INLINE Ref<StringImpl> StringImpl::removeCharacters(const CharType* characters, CharacterMatchFunctionPtr findMatch)
{
const CharType* from = characters;
const CharType* fromend = from + m_length;
@@ -803,10 +814,10 @@ ALWAYS_INLINE PassRef<StringImpl> StringImpl::removeCharacters(const CharType* c
data.shrink(outc);
- return adopt(data);
+ return adopt(WTFMove(data));
}
-PassRef<StringImpl> StringImpl::removeCharacters(CharacterMatchFunctionPtr findMatch)
+Ref<StringImpl> StringImpl::removeCharacters(CharacterMatchFunctionPtr findMatch)
{
if (is8Bit())
return removeCharacters(characters8(), findMatch);
@@ -814,11 +825,11 @@ PassRef<StringImpl> StringImpl::removeCharacters(CharacterMatchFunctionPtr findM
}
template <typename CharType, class UCharPredicate>
-inline PassRef<StringImpl> StringImpl::simplifyMatchedCharactersToSpace(UCharPredicate predicate)
+inline Ref<StringImpl> StringImpl::simplifyMatchedCharactersToSpace(UCharPredicate predicate)
{
StringBuffer<CharType> data(m_length);
- const CharType* from = getCharacters<CharType>();
+ const CharType* from = characters<CharType>();
const CharType* fromend = from + m_length;
int outc = 0;
bool changedToSpace = false;
@@ -847,17 +858,17 @@ inline PassRef<StringImpl> StringImpl::simplifyMatchedCharactersToSpace(UCharPre
data.shrink(outc);
- return adopt(data);
+ return adopt(WTFMove(data));
}
-PassRef<StringImpl> StringImpl::simplifyWhiteSpace()
+Ref<StringImpl> StringImpl::simplifyWhiteSpace()
{
if (is8Bit())
return StringImpl::simplifyMatchedCharactersToSpace<LChar>(SpaceOrNewlinePredicate());
return StringImpl::simplifyMatchedCharactersToSpace<UChar>(SpaceOrNewlinePredicate());
}
-PassRef<StringImpl> StringImpl::simplifyWhiteSpace(IsWhiteSpaceFunctionPtr isWhiteSpace)
+Ref<StringImpl> StringImpl::simplifyWhiteSpace(IsWhiteSpaceFunctionPtr isWhiteSpace)
{
if (is8Bit())
return StringImpl::simplifyMatchedCharactersToSpace<LChar>(UCharPredicate(isWhiteSpace));
@@ -948,24 +959,54 @@ float StringImpl::toFloat(bool* ok)
return charactersToFloat(characters16(), m_length, ok);
}
-bool equalIgnoringCase(const LChar* a, const LChar* b, unsigned length)
+// Table is based on ftp://ftp.unicode.org/Public/UNIDATA/CaseFolding.txt
+static const UChar latin1CaseFoldTable[256] = {
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
+ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
+ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
+ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
+ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x03bc, 0x00b6, 0x00b7, 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
+ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
+ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00d7, 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00df,
+ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
+ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7, 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
+};
+
+static inline bool equalCompatibilityCaseless(const LChar* a, const LChar* b, unsigned length)
{
while (length--) {
- if (StringImpl::latin1CaseFoldTable[*a++] != StringImpl::latin1CaseFoldTable[*b++])
+ if (latin1CaseFoldTable[*a++] != latin1CaseFoldTable[*b++])
return false;
}
return true;
}
-bool equalIgnoringCase(const UChar* a, const LChar* b, unsigned length)
+static inline bool equalCompatibilityCaseless(const UChar* a, const LChar* b, unsigned length)
{
while (length--) {
- if (u_foldCase(*a++, U_FOLD_CASE_DEFAULT) != StringImpl::latin1CaseFoldTable[*b++])
+ if (u_foldCase(*a++, U_FOLD_CASE_DEFAULT) != latin1CaseFoldTable[*b++])
return false;
}
return true;
}
+static inline bool equalCompatibilityCaseless(const LChar* a, const UChar* b, unsigned length)
+{
+ return equalCompatibilityCaseless(b, a, length);
+}
+
+static inline bool equalCompatibilityCaseless(const UChar* a, const UChar* b, unsigned length)
+{
+ return !u_memcasecmp(a, b, length, U_FOLD_CASE_DEFAULT);
+}
+
size_t StringImpl::find(CharacterMatchFunctionPtr matchFunction, unsigned start)
{
if (is8Bit())
@@ -986,8 +1027,11 @@ size_t StringImpl::find(const LChar* matchString, unsigned index)
return std::min(index, length());
// Optimization 1: fast case for strings of length 1.
- if (matchLength == 1)
+ if (matchLength == 1) {
+ if (is8Bit())
+ return WTF::find(characters8(), length(), matchString[0], index);
return WTF::find(characters16(), length(), *matchString, index);
+ }
// Check index & matchLength are in range.
if (index > length())
@@ -998,10 +1042,32 @@ size_t StringImpl::find(const LChar* matchString, unsigned index)
// delta is the number of additional times to test; delta == 0 means test only once.
unsigned delta = searchLength - matchLength;
- const UChar* searchCharacters = deprecatedCharacters() + index;
-
// Optimization 2: keep a running hash of the strings,
// only call equal if the hashes match.
+
+ if (is8Bit()) {
+ const LChar* searchCharacters = characters8() + index;
+
+ unsigned searchHash = 0;
+ unsigned matchHash = 0;
+ for (unsigned i = 0; i < matchLength; ++i) {
+ searchHash += searchCharacters[i];
+ matchHash += matchString[i];
+ }
+
+ unsigned i = 0;
+ while (searchHash != matchHash || !equal(searchCharacters + i, matchString, matchLength)) {
+ if (i == delta)
+ return notFound;
+ searchHash += searchCharacters[i + matchLength];
+ searchHash -= searchCharacters[i];
+ ++i;
+ }
+ return index + i;
+ }
+
+ const UChar* searchCharacters = characters16() + index;
+
unsigned searchHash = 0;
unsigned matchHash = 0;
for (unsigned i = 0; i < matchLength; ++i) {
@@ -1010,7 +1076,6 @@ size_t StringImpl::find(const LChar* matchString, unsigned index)
}
unsigned i = 0;
- // keep looping until we match
while (searchHash != matchHash || !equal(searchCharacters + i, matchString, matchLength)) {
if (i == delta)
return notFound;
@@ -1042,45 +1107,27 @@ size_t StringImpl::findIgnoringCase(const LChar* matchString, unsigned index)
// delta is the number of additional times to test; delta == 0 means test only once.
unsigned delta = searchLength - matchLength;
- const UChar* searchCharacters = deprecatedCharacters() + index;
+ if (is8Bit()) {
+ const LChar* searchCharacters = characters8() + index;
- unsigned i = 0;
- // keep looping until we match
- while (!equalIgnoringCase(searchCharacters + i, matchString, matchLength)) {
- if (i == delta)
- return notFound;
- ++i;
+ unsigned i = 0;
+ while (!equalCompatibilityCaseless(searchCharacters + i, matchString, matchLength)) {
+ if (i == delta)
+ return notFound;
+ ++i;
+ }
+ return index + i;
}
- return index + i;
-}
-
-template <typename SearchCharacterType, typename MatchCharacterType>
-ALWAYS_INLINE static size_t findInner(const SearchCharacterType* searchCharacters, const MatchCharacterType* matchCharacters, unsigned index, unsigned searchLength, unsigned matchLength)
-{
- // Optimization: keep a running hash of the strings,
- // only call equal() if the hashes match.
- // delta is the number of additional times to test; delta == 0 means test only once.
- unsigned delta = searchLength - matchLength;
-
- unsigned searchHash = 0;
- unsigned matchHash = 0;
-
- for (unsigned i = 0; i < matchLength; ++i) {
- searchHash += searchCharacters[i];
- matchHash += matchCharacters[i];
- }
+ const UChar* searchCharacters = characters16() + index;
unsigned i = 0;
- // keep looping until we match
- while (searchHash != matchHash || !equal(searchCharacters + i, matchCharacters, matchLength)) {
+ while (!equalCompatibilityCaseless(searchCharacters + i, matchString, matchLength)) {
if (i == delta)
return notFound;
- searchHash += searchCharacters[i + matchLength];
- searchHash -= searchCharacters[i];
++i;
}
- return index + i;
+ return index + i;
}
size_t StringImpl::find(StringImpl* matchString)
@@ -1128,35 +1175,7 @@ size_t StringImpl::find(StringImpl* matchString, unsigned index)
if (UNLIKELY(!matchString))
return notFound;
- unsigned matchLength = matchString->length();
-
- // Optimization 1: fast case for strings of length 1.
- if (matchLength == 1) {
- if (is8Bit())
- return WTF::find(characters8(), length(), (*matchString)[0], index);
- return WTF::find(characters16(), length(), (*matchString)[0], index);
- }
-
- if (UNLIKELY(!matchLength))
- return std::min(index, length());
-
- // Check index & matchLength are in range.
- if (index > length())
- return notFound;
- unsigned searchLength = length() - index;
- if (matchLength > searchLength)
- return notFound;
-
- if (is8Bit()) {
- if (matchString->is8Bit())
- return findInner(characters8() + index, matchString->characters8(), index, searchLength, matchLength);
- return findInner(characters8() + index, matchString->characters16(), index, searchLength, matchLength);
- }
-
- if (matchString->is8Bit())
- return findInner(characters16() + index, matchString->characters8(), index, searchLength, matchLength);
-
- return findInner(characters16() + index, matchString->characters16(), index, searchLength, matchLength);
+ return findCommon(*this, *matchString, index);
}
template <typename SearchCharacterType, typename MatchCharacterType>
@@ -1167,7 +1186,7 @@ ALWAYS_INLINE static size_t findIgnoringCaseInner(const SearchCharacterType* sea
unsigned i = 0;
// keep looping until we match
- while (!equalIgnoringCase(searchCharacters + i, matchCharacters, matchLength)) {
+ while (!equalCompatibilityCaseless(searchCharacters + i, matchCharacters, matchLength)) {
if (i == delta)
return notFound;
++i;
@@ -1203,11 +1222,28 @@ size_t StringImpl::findIgnoringCase(StringImpl* matchString, unsigned index)
return findIgnoringCaseInner(characters16() + index, matchString->characters16(), index, searchLength, matchLength);
}
-size_t StringImpl::findNextLineStart(unsigned index)
+size_t StringImpl::findIgnoringASCIICase(const StringImpl& matchString) const
{
- if (is8Bit())
- return WTF::findNextLineStart(characters8(), m_length, index);
- return WTF::findNextLineStart(characters16(), m_length, index);
+ return ::WTF::findIgnoringASCIICase(*this, matchString, 0);
+}
+
+size_t StringImpl::findIgnoringASCIICase(const StringImpl& matchString, unsigned startOffset) const
+{
+ return ::WTF::findIgnoringASCIICase(*this, matchString, startOffset);
+}
+
+size_t StringImpl::findIgnoringASCIICase(const StringImpl* matchString) const
+{
+ if (!matchString)
+ return notFound;
+ return ::WTF::findIgnoringASCIICase(*this, *matchString, 0);
+}
+
+size_t StringImpl::findIgnoringASCIICase(const StringImpl* matchString, unsigned startOffset) const
+{
+ if (!matchString)
+ return notFound;
+ return ::WTF::findIgnoringASCIICase(*this, *matchString, startOffset);
}
size_t StringImpl::reverseFind(UChar c, unsigned index)
@@ -1284,7 +1320,7 @@ ALWAYS_INLINE static size_t reverseFindIgnoringCaseInner(const SearchCharacterTy
unsigned delta = std::min(index, length - matchLength);
// keep looping until we match
- while (!equalIgnoringCase(searchCharacters + delta, matchCharacters, matchLength)) {
+ while (!equalCompatibilityCaseless(searchCharacters + delta, matchCharacters, matchLength)) {
if (!delta)
return notFound;
--delta;
@@ -1330,26 +1366,52 @@ ALWAYS_INLINE static bool equalInner(const StringImpl* stringImpl, unsigned star
return equal(stringImpl->characters16() + startOffset, reinterpret_cast<const LChar*>(matchString), matchLength);
}
if (stringImpl->is8Bit())
- return equalIgnoringCase(stringImpl->characters8() + startOffset, reinterpret_cast<const LChar*>(matchString), matchLength);
- return equalIgnoringCase(stringImpl->characters16() + startOffset, reinterpret_cast<const LChar*>(matchString), matchLength);
+ return equalCompatibilityCaseless(stringImpl->characters8() + startOffset, reinterpret_cast<const LChar*>(matchString), matchLength);
+ return equalCompatibilityCaseless(stringImpl->characters16() + startOffset, reinterpret_cast<const LChar*>(matchString), matchLength);
+}
+
+ALWAYS_INLINE static bool equalInner(const StringImpl& stringImpl, unsigned startOffset, const StringImpl& matchString)
+{
+ if (startOffset > stringImpl.length())
+ return false;
+ if (matchString.length() > stringImpl.length())
+ return false;
+ if (matchString.length() + startOffset > stringImpl.length())
+ return false;
+
+ if (stringImpl.is8Bit()) {
+ if (matchString.is8Bit())
+ return equal(stringImpl.characters8() + startOffset, matchString.characters8(), matchString.length());
+ return equal(stringImpl.characters8() + startOffset, matchString.characters16(), matchString.length());
+ }
+ if (matchString.is8Bit())
+ return equal(stringImpl.characters16() + startOffset, matchString.characters8(), matchString.length());
+ return equal(stringImpl.characters16() + startOffset, matchString.characters16(), matchString.length());
}
bool StringImpl::startsWith(const StringImpl* str) const
{
if (!str)
return false;
+ return ::WTF::startsWith(*this, *str);
+}
- if (str->length() > length())
+bool StringImpl::startsWith(const StringImpl& str) const
+{
+ return ::WTF::startsWith(*this, str);
+}
+
+bool StringImpl::startsWithIgnoringASCIICase(const StringImpl* prefix) const
+{
+ if (!prefix)
return false;
- if (is8Bit()) {
- if (str->is8Bit())
- return equal(characters8(), str->characters8(), str->length());
- return equal(characters8(), str->characters16(), str->length());
- }
- if (str->is8Bit())
- return equal(characters16(), str->characters8(), str->length());
- return equal(characters16(), str->characters16(), str->length());
+ return ::WTF::startsWithIgnoringASCIICase(*this, *prefix);
+}
+
+bool StringImpl::startsWithIgnoringASCIICase(const StringImpl& prefix) const
+{
+ return ::WTF::startsWithIgnoringASCIICase(*this, prefix);
}
bool StringImpl::startsWith(UChar character) const
@@ -1365,6 +1427,24 @@ bool StringImpl::startsWith(const char* matchString, unsigned matchLength, bool
return equalInner(this, 0, matchString, matchLength, caseSensitive);
}
+bool StringImpl::hasInfixStartingAt(const StringImpl& matchString, unsigned startOffset) const
+{
+ return equalInner(*this, startOffset, matchString);
+}
+
+bool StringImpl::endsWith(StringImpl* suffix)
+{
+ if (!suffix)
+ return false;
+
+ return ::WTF::endsWith(*this, *suffix);
+}
+
+bool StringImpl::endsWith(StringImpl& suffix)
+{
+ return ::WTF::endsWith(*this, suffix);
+}
+
bool StringImpl::endsWith(StringImpl* matchString, bool caseSensitive)
{
ASSERT(matchString);
@@ -1375,6 +1455,19 @@ bool StringImpl::endsWith(StringImpl* matchString, bool caseSensitive)
return false;
}
+bool StringImpl::endsWithIgnoringASCIICase(const StringImpl* suffix) const
+{
+ if (!suffix)
+ return false;
+
+ return ::WTF::endsWithIgnoringASCIICase(*this, *suffix);
+}
+
+bool StringImpl::endsWithIgnoringASCIICase(const StringImpl& suffix) const
+{
+ return ::WTF::endsWithIgnoringASCIICase(*this, suffix);
+}
+
bool StringImpl::endsWith(UChar character) const
{
return m_length && (*this)[m_length - 1] == character;
@@ -1389,7 +1482,14 @@ bool StringImpl::endsWith(const char* matchString, unsigned matchLength, bool ca
return equalInner(this, startOffset, matchString, matchLength, caseSensitive);
}
-PassRef<StringImpl> StringImpl::replace(UChar oldC, UChar newC)
+bool StringImpl::hasInfixEndingAt(const StringImpl& matchString, unsigned endOffset) const
+{
+ if (endOffset < matchString.length())
+ return false;
+ return equalInner(*this, endOffset - matchString.length(), matchString);
+}
+
+Ref<StringImpl> StringImpl::replace(UChar oldC, UChar newC)
{
if (oldC == newC)
return *this;
@@ -1450,7 +1550,7 @@ PassRef<StringImpl> StringImpl::replace(UChar oldC, UChar newC)
return newImpl;
}
-PassRef<StringImpl> StringImpl::replace(unsigned position, unsigned lengthToReplace, StringImpl* str)
+Ref<StringImpl> StringImpl::replace(unsigned position, unsigned lengthToReplace, StringImpl* str)
{
position = std::min(position, length());
lengthToReplace = std::min(lengthToReplace, length() - position);
@@ -1495,7 +1595,7 @@ PassRef<StringImpl> StringImpl::replace(unsigned position, unsigned lengthToRepl
return newImpl;
}
-PassRef<StringImpl> StringImpl::replace(UChar pattern, StringImpl* replacement)
+Ref<StringImpl> StringImpl::replace(UChar pattern, StringImpl* replacement)
{
if (!replacement)
return *this;
@@ -1506,7 +1606,7 @@ PassRef<StringImpl> StringImpl::replace(UChar pattern, StringImpl* replacement)
return replace(pattern, replacement->m_data16, replacement->length());
}
-PassRef<StringImpl> StringImpl::replace(UChar pattern, const LChar* replacement, unsigned repStrLength)
+Ref<StringImpl> StringImpl::replace(UChar pattern, const LChar* replacement, unsigned repStrLength)
{
ASSERT(replacement);
@@ -1583,7 +1683,7 @@ PassRef<StringImpl> StringImpl::replace(UChar pattern, const LChar* replacement,
return newImpl;
}
-PassRef<StringImpl> StringImpl::replace(UChar pattern, const UChar* replacement, unsigned repStrLength)
+Ref<StringImpl> StringImpl::replace(UChar pattern, const UChar* replacement, unsigned repStrLength)
{
ASSERT(replacement);
@@ -1663,7 +1763,7 @@ PassRef<StringImpl> StringImpl::replace(UChar pattern, const UChar* replacement,
return newImpl;
}
-PassRef<StringImpl> StringImpl::replace(StringImpl* pattern, StringImpl* replacement)
+Ref<StringImpl> StringImpl::replace(StringImpl* pattern, StringImpl* replacement)
{
if (!pattern || !replacement)
return *this;
@@ -1770,34 +1870,9 @@ PassRef<StringImpl> StringImpl::replace(StringImpl* pattern, StringImpl* replace
return newImpl;
}
-static inline bool stringImplContentEqual(const StringImpl* a, const StringImpl* b)
-{
- unsigned aLength = a->length();
- unsigned bLength = b->length();
- if (aLength != bLength)
- return false;
-
- if (a->is8Bit()) {
- if (b->is8Bit())
- return equal(a->characters8(), b->characters8(), aLength);
-
- return equal(a->characters8(), b->characters16(), aLength);
- }
-
- if (b->is8Bit())
- return equal(a->characters16(), b->characters8(), aLength);
-
- return equal(a->characters16(), b->characters16(), aLength);
-}
-
bool equal(const StringImpl* a, const StringImpl* b)
{
- if (a == b)
- return true;
- if (!a || !b)
- return false;
-
- return stringImplContentEqual(a, b);
+ return equalCommon(a, b);
}
template <typename CharType>
@@ -1860,109 +1935,34 @@ bool equal(const StringImpl* a, const LChar* b)
return !b[length];
}
-bool equalNonNull(const StringImpl* a, const StringImpl* b)
+bool equal(const StringImpl& a, const StringImpl& b)
{
- ASSERT(a && b);
- if (a == b)
- return true;
-
- return stringImplContentEqual(a, b);
+ return equalCommon(a, b);
}
-bool equalIgnoringCase(const StringImpl* a, const StringImpl* b)
+bool equalIgnoringNullity(StringImpl* a, StringImpl* b)
{
- if (a == b)
+ if (!a && b && !b->length())
return true;
- if (!a || !b)
- return false;
-
- return CaseFoldingHash::equal(a, b);
-}
-
-bool equalIgnoringCase(const StringImpl* a, const LChar* b)
-{
- if (!a)
- return !b;
- if (!b)
- return !a;
-
- unsigned length = a->length();
-
- // Do a faster loop for the case where all the characters are ASCII.
- UChar ored = 0;
- bool equal = true;
- if (a->is8Bit()) {
- const LChar* as = a->characters8();
- for (unsigned i = 0; i != length; ++i) {
- LChar bc = b[i];
- if (!bc)
- return false;
- UChar ac = as[i];
- ored |= ac;
- equal = equal && (toASCIILower(ac) == toASCIILower(bc));
- }
-
- // Do a slower implementation for cases that include non-ASCII characters.
- if (ored & ~0x7F) {
- equal = true;
- for (unsigned i = 0; i != length; ++i)
- equal = equal && u_foldCase(as[i], U_FOLD_CASE_DEFAULT) == u_foldCase(b[i], U_FOLD_CASE_DEFAULT);
- }
-
- return equal && !b[length];
- }
-
- const UChar* as = a->characters16();
- for (unsigned i = 0; i != length; ++i) {
- LChar bc = b[i];
- if (!bc)
- return false;
- UChar ac = as[i];
- ored |= ac;
- equal = equal && (toASCIILower(ac) == toASCIILower(bc));
- }
-
- // Do a slower implementation for cases that include non-ASCII characters.
- if (ored & ~0x7F) {
- equal = true;
- for (unsigned i = 0; i != length; ++i) {
- equal = equal && u_foldCase(as[i], U_FOLD_CASE_DEFAULT) == u_foldCase(b[i], U_FOLD_CASE_DEFAULT);
- }
- }
-
- return equal && !b[length];
+ if (!b && a && !a->length())
+ return true;
+ return equal(a, b);
}
-bool equalIgnoringCaseNonNull(const StringImpl* a, const StringImpl* b)
+bool equalIgnoringASCIICase(const StringImpl* a, const StringImpl* b)
{
- ASSERT(a && b);
if (a == b)
return true;
-
- unsigned length = a->length();
- if (length != b->length())
+ if (!a || !b)
return false;
-
- if (a->is8Bit()) {
- if (b->is8Bit())
- return equalIgnoringCase(a->characters8(), b->characters8(), length);
-
- return equalIgnoringCase(b->characters16(), a->characters8(), length);
- }
-
- if (b->is8Bit())
- return equalIgnoringCase(a->characters16(), b->characters8(), length);
-
- return equalIgnoringCase(a->characters16(), b->characters16(), length);
+ return equalIgnoringASCIICaseCommon(*a, *b);
}
-bool equalIgnoringNullity(StringImpl* a, StringImpl* b)
+bool equalIgnoringASCIICaseNonNull(const StringImpl* a, const StringImpl* b)
{
- if (!a && b && !b->length())
- return true;
- if (!b && a && !a->length())
- return true;
- return equal(a, b);
+ ASSERT(a);
+ ASSERT(b);
+ return equalIgnoringASCIICase(*a, *b);
}
UCharDirection StringImpl::defaultWritingDirection(bool* hasStrongDirectionality)
@@ -1985,7 +1985,7 @@ UCharDirection StringImpl::defaultWritingDirection(bool* hasStrongDirectionality
return U_LEFT_TO_RIGHT;
}
-PassRef<StringImpl> StringImpl::adopt(StringBuffer<LChar>& buffer)
+Ref<StringImpl> StringImpl::adopt(StringBuffer<LChar>&& buffer)
{
unsigned length = buffer.length();
if (!length)
@@ -1993,7 +1993,7 @@ PassRef<StringImpl> StringImpl::adopt(StringBuffer<LChar>& buffer)
return adoptRef(*new StringImpl(buffer.release(), length));
}
-PassRef<StringImpl> StringImpl::adopt(StringBuffer<UChar>& buffer)
+Ref<StringImpl> StringImpl::adopt(StringBuffer<UChar>&& buffer)
{
unsigned length = buffer.length();
if (!length)
@@ -2005,11 +2005,7 @@ size_t StringImpl::sizeInBytes() const
{
// FIXME: support substrings
size_t size = length();
- if (is8Bit()) {
- if (has16BitShadow()) {
- size += 2 * size;
- }
- } else
+ if (!is8Bit())
size *= 2;
return size + sizeof(*this);
}
@@ -2023,8 +2019,7 @@ static inline void putUTF8Triple(char*& buffer, UChar ch)
*buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
}
-bool StringImpl::utf8Impl(
- const UChar* characters, unsigned length, char*& buffer, size_t bufferSize, ConversionMode mode)
+bool StringImpl::utf8Impl(const UChar* characters, unsigned length, char*& buffer, size_t bufferSize, ConversionMode mode)
{
if (mode == StrictConversionReplacingUnpairedSurrogatesWithFFFD) {
const UChar* charactersEnd = characters + length;
@@ -2075,8 +2070,21 @@ bool StringImpl::utf8Impl(
return true;
}
-CString StringImpl::utf8ForCharacters(
- const UChar* characters, unsigned length, ConversionMode mode)
+CString StringImpl::utf8ForCharacters(const LChar* characters, unsigned length)
+{
+ if (!length)
+ return CString("", 0);
+ if (length > std::numeric_limits<unsigned>::max() / 3)
+ return CString();
+ Vector<char, 1024> bufferVector(length * 3);
+ char* buffer = bufferVector.data();
+ const LChar* source = characters;
+ ConversionResult result = convertLatin1ToUTF8(&source, source + length, &buffer, buffer + bufferVector.size());
+ ASSERT_UNUSED(result, result != targetExhausted); // (length * 3) should be sufficient for any conversion
+ return CString(bufferVector.data(), buffer - bufferVector.data());
+}
+
+CString StringImpl::utf8ForCharacters(const UChar* characters, unsigned length, ConversionMode mode)
{
if (!length)
return CString("", 0);
@@ -2131,25 +2139,21 @@ CString StringImpl::utf8(ConversionMode mode) const
return utf8ForRange(0, length(), mode);
}
-// Table is based on ftp://ftp.unicode.org/Public/UNIDATA/CaseFolding.txt
-const UChar StringImpl::latin1CaseFoldTable[256] = {
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
- 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
- 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
- 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
- 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
- 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
- 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
- 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
- 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
- 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x03bc, 0x00b6, 0x00b7, 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
- 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
- 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00d7, 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00df,
- 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
- 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7, 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
-};
-
+bool equalIgnoringNullity(const UChar* a, size_t aLength, StringImpl* b)
+{
+ if (!b)
+ return !aLength;
+ if (aLength != b->length())
+ return false;
+ if (b->is8Bit()) {
+ const LChar* bCharacters = b->characters8();
+ for (unsigned i = 0; i < aLength; ++i) {
+ if (a[i] != bCharacters[i])
+ return false;
+ }
+ return true;
+ }
+ return !memcmp(a, b->characters16(), b->length() * sizeof(UChar));
+}
} // namespace WTF
diff --git a/Source/WTF/wtf/text/StringImpl.h b/Source/WTF/wtf/text/StringImpl.h
index 770acf000..b2c45e8fa 100644
--- a/Source/WTF/wtf/text/StringImpl.h
+++ b/Source/WTF/wtf/text/StringImpl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 Lars Knoll (knoll@kde.org)
- * Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2005-2010, 2013-2016 Apple Inc. All rights reserved.
* Copyright (C) 2009 Google Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
@@ -24,16 +24,16 @@
#define StringImpl_h
#include <limits.h>
+#include <unicode/uchar.h>
+#include <unicode/ustring.h>
#include <wtf/ASCIICType.h>
-#include <wtf/CompilationThread.h>
-#include <wtf/CryptographicallyRandomNumber.h>
#include <wtf/Forward.h>
+#include <wtf/Hasher.h>
#include <wtf/MathExtras.h>
#include <wtf/StdLibExtras.h>
-#include <wtf/StringHasher.h>
#include <wtf/Vector.h>
#include <wtf/text/ConversionMode.h>
-#include <wtf/unicode/Unicode.h>
+#include <wtf/text/StringCommon.h>
#if USE(CF)
typedef const struct __CFString * CFStringRef;
@@ -43,27 +43,28 @@ typedef const struct __CFString * CFStringRef;
@class NSString;
#endif
-// FIXME: This is a temporary layering violation while we move string code to WTF.
-// Landing the file moves in one patch, will follow on with patches to change the namespaces.
namespace JSC {
-struct IdentifierASCIIStringTranslator;
namespace LLInt { class Data; }
class LLIntOffsetsExtractor;
-template <typename T> struct IdentifierCharBufferTranslator;
-struct IdentifierLCharFromUCharTranslator;
}
namespace WTF {
+class SymbolImpl;
+class SymbolRegistry;
+
struct CStringTranslator;
-template<typename CharacterType> struct HashAndCharactersTranslator;
+struct CharBufferFromLiteralDataTranslator;
struct HashAndUTF8CharactersTranslator;
struct LCharBufferTranslator;
-struct CharBufferFromLiteralDataTranslator;
+struct StringHash;
struct SubstringTranslator;
struct UCharBufferTranslator;
+
template<typename> class RetainPtr;
+template<typename> struct HashAndCharactersTranslator;
+
enum TextCaseSensitivity {
TextCaseSensitive,
TextCaseInsensitive
@@ -72,10 +73,10 @@ enum TextCaseSensitivity {
typedef bool (*CharacterMatchFunctionPtr)(UChar);
typedef bool (*IsWhiteSpaceFunctionPtr)(UChar);
-// Define STRING_STATS to turn on run time statistics of string sizes and memory usage
-#undef STRING_STATS
+// Define STRING_STATS to 1 turn on run time statistics of string sizes and memory usage
+#define STRING_STATS 0
-#ifdef STRING_STATS
+#if STRING_STATS
struct StringStats {
inline void add8BitString(unsigned length, bool isSubString = false)
{
@@ -93,33 +94,29 @@ struct StringStats {
m_total16BitData += length;
}
- inline void addUpconvertedString(unsigned length)
- {
- ++m_numberUpconvertedStrings;
- m_totalUpconvertedData += length;
- }
-
- void removeString(StringImpl*);
+ void removeString(StringImpl&);
void printStats();
static const unsigned s_printStringStatsFrequency = 5000;
- static unsigned s_stringRemovesTillPrintStats;
-
- unsigned m_totalNumberStrings;
- unsigned m_number8BitStrings;
- unsigned m_number16BitStrings;
- unsigned m_numberUpconvertedStrings;
- unsigned long long m_total8BitData;
- unsigned long long m_total16BitData;
- unsigned long long m_totalUpconvertedData;
+ static std::atomic<unsigned> s_stringRemovesTillPrintStats;
+
+ std::atomic<unsigned> m_refCalls;
+ std::atomic<unsigned> m_derefCalls;
+
+ std::atomic<unsigned> m_totalNumberStrings;
+ std::atomic<unsigned> m_number8BitStrings;
+ std::atomic<unsigned> m_number16BitStrings;
+ std::atomic<unsigned long long> m_total8BitData;
+ std::atomic<unsigned long long> m_total16BitData;
};
#define STRING_STATS_ADD_8BIT_STRING(length) StringImpl::stringStats().add8BitString(length)
#define STRING_STATS_ADD_8BIT_STRING2(length, isSubString) StringImpl::stringStats().add8BitString(length, isSubString)
#define STRING_STATS_ADD_16BIT_STRING(length) StringImpl::stringStats().add16BitString(length)
#define STRING_STATS_ADD_16BIT_STRING2(length, isSubString) StringImpl::stringStats().add16BitString(length, isSubString)
-#define STRING_STATS_ADD_UPCONVERTED_STRING(length) StringImpl::stringStats().addUpconvertedString(length)
#define STRING_STATS_REMOVE_STRING(string) StringImpl::stringStats().removeString(string)
+#define STRING_STATS_REF_STRING(string) ++StringImpl::stringStats().m_refCalls;
+#define STRING_STATS_DEREF_STRING(string) ++StringImpl::stringStats().m_derefCalls;
#else
#define STRING_STATS_ADD_8BIT_STRING(length) ((void)0)
#define STRING_STATS_ADD_8BIT_STRING2(length, isSubString) ((void)0)
@@ -127,14 +124,12 @@ struct StringStats {
#define STRING_STATS_ADD_16BIT_STRING2(length, isSubString) ((void)0)
#define STRING_STATS_ADD_UPCONVERTED_STRING(length) ((void)0)
#define STRING_STATS_REMOVE_STRING(string) ((void)0)
+#define STRING_STATS_REF_STRING(string) ((void)0)
+#define STRING_STATS_DEREF_STRING(string) ((void)0)
#endif
class StringImpl {
WTF_MAKE_NONCOPYABLE(StringImpl); WTF_MAKE_FAST_ALLOCATED;
- friend struct JSC::IdentifierASCIIStringTranslator;
- friend struct JSC::IdentifierCharBufferTranslator<LChar>;
- friend struct JSC::IdentifierCharBufferTranslator<UChar>;
- friend struct JSC::IdentifierLCharFromUCharTranslator;
friend struct WTF::CStringTranslator;
template<typename CharacterType> friend struct WTF::HashAndCharactersTranslator;
friend struct WTF::HashAndUTF8CharactersTranslator;
@@ -142,9 +137,9 @@ class StringImpl {
friend struct WTF::LCharBufferTranslator;
friend struct WTF::SubstringTranslator;
friend struct WTF::UCharBufferTranslator;
- friend class AtomicStringImpl;
friend class JSC::LLInt::Data;
friend class JSC::LLIntOffsetsExtractor;
+ friend class SymbolImpl;
private:
enum BufferOwnership {
@@ -153,42 +148,26 @@ private:
BufferSubstring,
};
- // Used to construct static strings, which have an special refCount that can never hit zero.
- // This means that the static string will never be destroyed, which is important because
- // static strings will be shared across threads & ref-counted in a non-threadsafe manner.
- enum ConstructStaticStringTag { ConstructStaticString };
- StringImpl(const UChar* characters, unsigned length, ConstructStaticStringTag)
- : m_refCount(s_refCountFlagIsStaticString)
- , m_length(length)
- , m_data16(characters)
- , m_buffer(0)
- , m_hashAndFlags(s_hashFlagIsIdentifier | BufferOwned)
- {
- // Ensure that the hash is computed so that AtomicStringHash can call existingHash()
- // with impunity. The empty string is special because it is never entered into
- // AtomicString's HashKey, but still needs to compare correctly.
- STRING_STATS_ADD_16BIT_STRING(m_length);
-
- hash();
- }
-
- // Used to construct static strings, which have an special refCount that can never hit zero.
- // This means that the static string will never be destroyed, which is important because
- // static strings will be shared across threads & ref-counted in a non-threadsafe manner.
- StringImpl(const LChar* characters, unsigned length, ConstructStaticStringTag)
- : m_refCount(s_refCountFlagIsStaticString)
- , m_length(length)
- , m_data8(characters)
- , m_buffer(0)
- , m_hashAndFlags(s_hashFlag8BitBuffer | s_hashFlagIsIdentifier | BufferOwned)
- {
- // Ensure that the hash is computed so that AtomicStringHash can call existingHash()
- // with impunity. The empty string is special because it is never entered into
- // AtomicString's HashKey, but still needs to compare correctly.
- STRING_STATS_ADD_8BIT_STRING(m_length);
-
- hash();
- }
+ // The bottom 6 bits in the hash are flags.
+public:
+ static constexpr const unsigned s_flagCount = 6;
+private:
+ static constexpr const unsigned s_flagMask = (1u << s_flagCount) - 1;
+ static_assert(s_flagCount <= StringHasher::flagCount, "StringHasher reserves enough bits for StringImpl flags");
+ static constexpr const unsigned s_flagStringKindCount = 4;
+
+ static constexpr const unsigned s_hashFlagStringKindIsAtomic = 1u << (s_flagStringKindCount);
+ static constexpr const unsigned s_hashFlagStringKindIsSymbol = 1u << (s_flagStringKindCount + 1);
+ static constexpr const unsigned s_hashMaskStringKind = s_hashFlagStringKindIsAtomic | s_hashFlagStringKindIsSymbol;
+ static constexpr const unsigned s_hashFlag8BitBuffer = 1u << 3;
+ static constexpr const unsigned s_hashFlagDidReportCost = 1u << 2;
+ static constexpr const unsigned s_hashMaskBufferOwnership = (1u << 0) | (1u << 1);
+
+ enum StringKind {
+ StringNormal = 0u, // non-symbol, non-atomic
+ StringAtomic = s_hashFlagStringKindIsAtomic, // non-symbol, atomic
+ StringSymbol = s_hashFlagStringKindIsSymbol, // symbol, non-atomic
+ };
// FIXME: there has to be a less hacky way to do this.
enum Force8Bit { Force8BitConstructor };
@@ -196,9 +175,8 @@ private:
StringImpl(unsigned length, Force8Bit)
: m_refCount(s_refCountIncrement)
, m_length(length)
- , m_data8(reinterpret_cast<const LChar*>(this + 1))
- , m_buffer(0)
- , m_hashAndFlags(s_hashFlag8BitBuffer | BufferInternal)
+ , m_data8(tailPointer<LChar>())
+ , m_hashAndFlags(s_hashFlag8BitBuffer | StringNormal | BufferInternal)
{
ASSERT(m_data8);
ASSERT(m_length);
@@ -210,9 +188,8 @@ private:
StringImpl(unsigned length)
: m_refCount(s_refCountIncrement)
, m_length(length)
- , m_data16(reinterpret_cast<const UChar*>(this + 1))
- , m_buffer(0)
- , m_hashAndFlags(BufferInternal)
+ , m_data16(tailPointer<UChar>())
+ , m_hashAndFlags(StringNormal | BufferInternal)
{
ASSERT(m_data16);
ASSERT(m_length);
@@ -225,8 +202,7 @@ private:
: m_refCount(s_refCountIncrement)
, m_length(length)
, m_data8(characters.leakPtr())
- , m_buffer(0)
- , m_hashAndFlags(s_hashFlag8BitBuffer | BufferOwned)
+ , m_hashAndFlags(s_hashFlag8BitBuffer | StringNormal | BufferOwned)
{
ASSERT(m_data8);
ASSERT(m_length);
@@ -239,26 +215,24 @@ private:
: m_refCount(s_refCountIncrement)
, m_length(length)
, m_data16(characters)
- , m_buffer(0)
- , m_hashAndFlags(BufferInternal)
+ , m_hashAndFlags(StringNormal | BufferInternal)
{
ASSERT(m_data16);
ASSERT(m_length);
- STRING_STATS_ADD_16BIT_STRING(0);
+ STRING_STATS_ADD_16BIT_STRING(m_length);
}
StringImpl(const LChar* characters, unsigned length, ConstructWithoutCopyingTag)
: m_refCount(s_refCountIncrement)
, m_length(length)
, m_data8(characters)
- , m_buffer(0)
- , m_hashAndFlags(s_hashFlag8BitBuffer | BufferInternal)
+ , m_hashAndFlags(s_hashFlag8BitBuffer | StringNormal | BufferInternal)
{
ASSERT(m_data8);
ASSERT(m_length);
- STRING_STATS_ADD_8BIT_STRING(0);
+ STRING_STATS_ADD_8BIT_STRING(m_length);
}
// Create a StringImpl adopting ownership of the provided buffer (BufferOwned)
@@ -266,8 +240,7 @@ private:
: m_refCount(s_refCountIncrement)
, m_length(length)
, m_data16(characters.leakPtr())
- , m_buffer(0)
- , m_hashAndFlags(BufferOwned)
+ , m_hashAndFlags(StringNormal | BufferOwned)
{
ASSERT(m_data16);
ASSERT(m_length);
@@ -276,109 +249,74 @@ private:
}
// Used to create new strings that are a substring of an existing 8-bit StringImpl (BufferSubstring)
- StringImpl(const LChar* characters, unsigned length, PassRefPtr<StringImpl> base)
+ StringImpl(const LChar* characters, unsigned length, Ref<StringImpl>&& base)
: m_refCount(s_refCountIncrement)
, m_length(length)
, m_data8(characters)
- , m_substringBuffer(base.leakRef())
- , m_hashAndFlags(s_hashFlag8BitBuffer | BufferSubstring)
+ , m_hashAndFlags(s_hashFlag8BitBuffer | StringNormal | BufferSubstring)
{
ASSERT(is8Bit());
ASSERT(m_data8);
ASSERT(m_length);
- ASSERT(m_substringBuffer->bufferOwnership() != BufferSubstring);
+ ASSERT(base->bufferOwnership() != BufferSubstring);
+
+ substringBuffer() = &base.leakRef();
STRING_STATS_ADD_8BIT_STRING2(m_length, true);
}
// Used to create new strings that are a substring of an existing 16-bit StringImpl (BufferSubstring)
- StringImpl(const UChar* characters, unsigned length, PassRefPtr<StringImpl> base)
+ StringImpl(const UChar* characters, unsigned length, Ref<StringImpl>&& base)
: m_refCount(s_refCountIncrement)
, m_length(length)
, m_data16(characters)
- , m_substringBuffer(base.leakRef())
- , m_hashAndFlags(BufferSubstring)
+ , m_hashAndFlags(StringNormal | BufferSubstring)
{
ASSERT(!is8Bit());
ASSERT(m_data16);
ASSERT(m_length);
- ASSERT(m_substringBuffer->bufferOwnership() != BufferSubstring);
+ ASSERT(base->bufferOwnership() != BufferSubstring);
- STRING_STATS_ADD_16BIT_STRING2(m_length, true);
- }
+ substringBuffer() = &base.leakRef();
- enum CreateEmptyUnique_T { CreateEmptyUnique };
- StringImpl(CreateEmptyUnique_T)
- : m_refCount(s_refCountIncrement)
- , m_length(0)
- // We expect m_buffer to be initialized to 0 as we use it
- // to represent a null terminated buffer.
- , m_data16(reinterpret_cast<const UChar*>(&m_buffer))
- , m_buffer(0)
- {
- ASSERT(m_data16);
- // Set the hash early, so that all empty unique StringImpls have a hash,
- // and don't use the normal hashing algorithm - the unique nature of these
- // keys means that we don't need them to match any other string (in fact,
- // that's exactly the oposite of what we want!), and teh normal hash would
- // lead to lots of conflicts.
- unsigned hash = cryptographicallyRandomNumber() | 1;
- hash <<= s_flagCount;
- if (!hash)
- hash = 1 << s_flagCount;
- m_hashAndFlags = hash | BufferInternal;
-
- STRING_STATS_ADD_16BIT_STRING(m_length);
+ STRING_STATS_ADD_16BIT_STRING2(m_length, true);
}
- ~StringImpl();
-
public:
WTF_EXPORT_STRING_API static void destroy(StringImpl*);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> create(const UChar*, unsigned length);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> create(const LChar*, unsigned length);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> create8BitIfPossible(const UChar*, unsigned length);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> create(const UChar*, unsigned length);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> create(const LChar*, unsigned length);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> create8BitIfPossible(const UChar*, unsigned length);
template<size_t inlineCapacity>
- static PassRef<StringImpl> create8BitIfPossible(const Vector<UChar, inlineCapacity>& vector)
+ static Ref<StringImpl> create8BitIfPossible(const Vector<UChar, inlineCapacity>& vector)
{
return create8BitIfPossible(vector.data(), vector.size());
}
- WTF_EXPORT_STRING_API static PassRef<StringImpl> create8BitIfPossible(const UChar*);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> create8BitIfPossible(const UChar*);
- ALWAYS_INLINE static PassRef<StringImpl> create(const char* s, unsigned length) { return create(reinterpret_cast<const LChar*>(s), length); }
- WTF_EXPORT_STRING_API static PassRef<StringImpl> create(const LChar*);
- ALWAYS_INLINE static PassRef<StringImpl> create(const char* s) { return create(reinterpret_cast<const LChar*>(s)); }
+ ALWAYS_INLINE static Ref<StringImpl> create(const char* s, unsigned length) { return create(reinterpret_cast<const LChar*>(s), length); }
+ WTF_EXPORT_STRING_API static Ref<StringImpl> create(const LChar*);
+ ALWAYS_INLINE static Ref<StringImpl> create(const char* s) { return create(reinterpret_cast<const LChar*>(s)); }
- static ALWAYS_INLINE PassRef<StringImpl> create8(PassRefPtr<StringImpl> rep, unsigned offset, unsigned length)
+ static ALWAYS_INLINE Ref<StringImpl> createSubstringSharingImpl(StringImpl& rep, unsigned offset, unsigned length)
{
- ASSERT(rep);
- ASSERT(length <= rep->length());
+ ASSERT(length <= rep.length());
if (!length)
return *empty();
- ASSERT(rep->is8Bit());
- StringImpl* ownerRep = (rep->bufferOwnership() == BufferSubstring) ? rep->m_substringBuffer : rep.get();
- return adoptRef(*new StringImpl(rep->m_data8 + offset, length, ownerRep));
- }
-
- static ALWAYS_INLINE PassRef<StringImpl> create(PassRefPtr<StringImpl> rep, unsigned offset, unsigned length)
- {
- ASSERT(rep);
- ASSERT(length <= rep->length());
-
- if (!length)
- return *empty();
+ auto* ownerRep = ((rep.bufferOwnership() == BufferSubstring) ? rep.substringBuffer() : &rep);
- StringImpl* ownerRep = (rep->bufferOwnership() == BufferSubstring) ? rep->m_substringBuffer : rep.get();
- if (rep->is8Bit())
- return adoptRef(*new StringImpl(rep->m_data8 + offset, length, ownerRep));
- return adoptRef(*new StringImpl(rep->m_data16 + offset, length, ownerRep));
+ // We allocate a buffer that contains both the StringImpl struct as well as the pointer to the owner string.
+ auto* stringImpl = static_cast<StringImpl*>(fastMalloc(allocationSize<StringImpl*>(1)));
+ if (rep.is8Bit())
+ return adoptRef(*new (NotNull, stringImpl) StringImpl(rep.m_data8 + offset, length, *ownerRep));
+ return adoptRef(*new (NotNull, stringImpl) StringImpl(rep.m_data16 + offset, length, *ownerRep));
}
template<unsigned charactersCount>
- ALWAYS_INLINE static PassRef<StringImpl> createFromLiteral(const char (&characters)[charactersCount])
+ ALWAYS_INLINE static Ref<StringImpl> createFromLiteral(const char (&characters)[charactersCount])
{
COMPILE_ASSERT(charactersCount > 1, StringImplFromLiteralNotEmpty);
COMPILE_ASSERT((charactersCount - 1 <= ((unsigned(~0) - sizeof(StringImpl)) / sizeof(LChar))), StringImplFromLiteralCannotOverflow);
@@ -387,53 +325,50 @@ public:
}
// FIXME: Transition off of these functions to createWithoutCopying instead.
- WTF_EXPORT_STRING_API static PassRef<StringImpl> createFromLiteral(const char* characters, unsigned length);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> createFromLiteral(const char* characters);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> createFromLiteral(const char* characters, unsigned length);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> createFromLiteral(const char* characters);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> createWithoutCopying(const UChar* characters, unsigned length);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> createWithoutCopying(const LChar* characters, unsigned length);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> createWithoutCopying(const UChar* characters, unsigned length);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> createWithoutCopying(const LChar* characters, unsigned length);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> createUninitialized(unsigned length, LChar*& data);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> createUninitialized(unsigned length, UChar*& data);
- template <typename T> static ALWAYS_INLINE PassRefPtr<StringImpl> tryCreateUninitialized(unsigned length, T*& output)
+ WTF_EXPORT_STRING_API static Ref<StringImpl> createUninitialized(unsigned length, LChar*& data);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> createUninitialized(unsigned length, UChar*& data);
+ template <typename T> static ALWAYS_INLINE RefPtr<StringImpl> tryCreateUninitialized(unsigned length, T*& output)
{
if (!length) {
- output = 0;
+ output = nullptr;
return empty();
}
if (length > ((std::numeric_limits<unsigned>::max() - sizeof(StringImpl)) / sizeof(T))) {
- output = 0;
- return 0;
+ output = nullptr;
+ return nullptr;
}
StringImpl* resultImpl;
- if (!tryFastMalloc(sizeof(T) * length + sizeof(StringImpl)).getValue(resultImpl)) {
- output = 0;
- return 0;
+ if (!tryFastMalloc(allocationSize<T>(length)).getValue(resultImpl)) {
+ output = nullptr;
+ return nullptr;
}
- output = reinterpret_cast<T*>(resultImpl + 1);
+ output = resultImpl->tailPointer<T>();
return constructInternal<T>(resultImpl, length);
}
- static PassRef<StringImpl> createEmptyUnique()
- {
- return adoptRef(*new StringImpl(CreateEmptyUnique));
- }
-
- // Reallocate the StringImpl. The originalString must be only owned by the PassRefPtr,
+ // Reallocate the StringImpl. The originalString must be only owned by the Ref,
// and the buffer ownership must be BufferInternal. Just like the input pointer of realloc(),
// the originalString can't be used after this function.
- static PassRef<StringImpl> reallocate(PassRefPtr<StringImpl> originalString, unsigned length, LChar*& data);
- static PassRef<StringImpl> reallocate(PassRefPtr<StringImpl> originalString, unsigned length, UChar*& data);
+ static Ref<StringImpl> reallocate(Ref<StringImpl>&& originalString, unsigned length, LChar*& data);
+ static Ref<StringImpl> reallocate(Ref<StringImpl>&& originalString, unsigned length, UChar*& data);
static unsigned flagsOffset() { return OBJECT_OFFSETOF(StringImpl, m_hashAndFlags); }
static unsigned flagIs8Bit() { return s_hashFlag8BitBuffer; }
- static unsigned flagIsIdentifier() { return s_hashFlagIsIdentifier; }
+ static unsigned flagIsAtomic() { return s_hashFlagStringKindIsAtomic; }
+ static unsigned flagIsSymbol() { return s_hashFlagStringKindIsSymbol; }
+ static unsigned maskStringKind() { return s_hashMaskStringKind; }
static unsigned dataOffset() { return OBJECT_OFFSETOF(StringImpl, m_data8); }
template<typename CharType, size_t inlineCapacity, typename OverflowHandler>
- static PassRef<StringImpl> adopt(Vector<CharType, inlineCapacity, OverflowHandler>& vector)
+ static Ref<StringImpl> adopt(Vector<CharType, inlineCapacity, OverflowHandler>&& vector)
{
if (size_t size = vector.size()) {
ASSERT(vector.data());
@@ -444,31 +379,24 @@ public:
return *empty();
}
- WTF_EXPORT_STRING_API static PassRef<StringImpl> adopt(StringBuffer<UChar>&);
- WTF_EXPORT_STRING_API static PassRef<StringImpl> adopt(StringBuffer<LChar>&);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> adopt(StringBuffer<UChar>&&);
+ WTF_EXPORT_STRING_API static Ref<StringImpl> adopt(StringBuffer<LChar>&&);
unsigned length() const { return m_length; }
+ static ptrdiff_t lengthMemoryOffset() { return OBJECT_OFFSETOF(StringImpl, m_length); }
bool is8Bit() const { return m_hashAndFlags & s_hashFlag8BitBuffer; }
ALWAYS_INLINE const LChar* characters8() const { ASSERT(is8Bit()); return m_data8; }
ALWAYS_INLINE const UChar* characters16() const { ASSERT(!is8Bit()); return m_data16; }
- const UChar* characters() const { return deprecatedCharacters(); } // FIXME: Delete this.
- ALWAYS_INLINE const UChar* deprecatedCharacters() const
- {
- if (!is8Bit())
- return m_data16;
-
- return getData16SlowCase();
- }
template <typename CharType>
- ALWAYS_INLINE const CharType * getCharacters() const;
+ ALWAYS_INLINE const CharType *characters() const;
size_t cost() const
{
// For substrings, return the cost of the base string.
if (bufferOwnership() == BufferSubstring)
- return m_substringBuffer->cost();
+ return substringBuffer()->cost();
if (m_hashAndFlags & s_hashFlagDidReportCost)
return 0;
@@ -486,7 +414,7 @@ public:
return 0;
if (bufferOwnership() == BufferSubstring)
- return divideRoundedUp(m_substringBuffer->costDuringGC(), refCount());
+ return divideRoundedUp(substringBuffer()->costDuringGC(), refCount());
size_t result = m_length;
if (!is8Bit())
@@ -496,40 +424,28 @@ public:
WTF_EXPORT_STRING_API size_t sizeInBytes() const;
- bool has16BitShadow() const { return m_hashAndFlags & s_hashFlagHas16BitShadow; }
- WTF_EXPORT_STRING_API void upconvertCharacters(unsigned, unsigned) const;
- bool isIdentifier() const { return m_hashAndFlags & s_hashFlagIsIdentifier; }
- bool isIdentifierOrUnique() const { return isIdentifier() || isEmptyUnique(); }
- void setIsIdentifier(bool isIdentifier)
- {
- ASSERT(!isStatic());
- ASSERT(!isEmptyUnique());
- if (isIdentifier)
- m_hashAndFlags |= s_hashFlagIsIdentifier;
- else
- m_hashAndFlags &= ~s_hashFlagIsIdentifier;
- }
-
- bool isEmptyUnique() const
- {
- return !length() && !isStatic();
- }
+ StringKind stringKind() const { return static_cast<StringKind>(m_hashAndFlags & s_hashMaskStringKind); }
+ bool isSymbol() const { return m_hashAndFlags & s_hashFlagStringKindIsSymbol; }
+ bool isAtomic() const { return m_hashAndFlags & s_hashFlagStringKindIsAtomic; }
- bool isAtomic() const { return m_hashAndFlags & s_hashFlagIsAtomic; }
void setIsAtomic(bool isAtomic)
{
ASSERT(!isStatic());
- ASSERT(!isEmptyUnique());
- if (isAtomic)
- m_hashAndFlags |= s_hashFlagIsAtomic;
- else
- m_hashAndFlags &= ~s_hashFlagIsAtomic;
+ ASSERT(!isSymbol());
+ if (isAtomic) {
+ m_hashAndFlags |= s_hashFlagStringKindIsAtomic;
+ ASSERT(stringKind() == StringAtomic);
+ } else {
+ m_hashAndFlags &= ~s_hashFlagStringKindIsAtomic;
+ ASSERT(stringKind() == StringNormal);
+ }
}
-#ifdef STRING_STATS
- bool isSubString() const { return bufferOwnership() == BufferSubstring; }
+#if STRING_STATS
+ bool isSubString() const { return bufferOwnership() == BufferSubstring; }
#endif
+ static WTF_EXPORT_STRING_API CString utf8ForCharacters(const LChar* characters, unsigned length);
static WTF_EXPORT_STRING_API CString utf8ForCharacters(const UChar* characters, unsigned length, ConversionMode = LenientConversion);
WTF_EXPORT_STRING_API CString utf8ForRange(unsigned offset, unsigned length, ConversionMode = LenientConversion) const;
WTF_EXPORT_STRING_API CString utf8(ConversionMode = LenientConversion) const;
@@ -577,7 +493,12 @@ public:
return existingHash();
return hashSlowCase();
}
-
+
+ WTF_EXPORT_PRIVATE unsigned concurrentHash() const;
+
+ unsigned symbolAwareHash() const;
+ unsigned existingSymbolAwareHash() const;
+
bool isStatic() const { return m_refCount & s_refCountFlagIsStaticString; }
inline size_t refCount() const
@@ -598,13 +519,15 @@ public:
inline void ref()
{
- ASSERT(!isCompilationThread());
+ STRING_STATS_REF_STRING(*this);
+
m_refCount += s_refCountIncrement;
}
inline void deref()
{
- ASSERT(!isCompilationThread());
+ STRING_STATS_DEREF_STRING(*this);
+
unsigned tempRefCount = m_refCount - s_refCountIncrement;
if (!tempRefCount) {
StringImpl::destroy(this);
@@ -613,7 +536,47 @@ public:
m_refCount = tempRefCount;
}
- WTF_EXPORT_PRIVATE static StringImpl* empty();
+ class StaticStringImpl {
+ WTF_MAKE_NONCOPYABLE(StaticStringImpl);
+ public:
+ // Used to construct static strings, which have an special refCount that can never hit zero.
+ // This means that the static string will never be destroyed, which is important because
+ // static strings will be shared across threads & ref-counted in a non-threadsafe manner.
+ template<unsigned charactersCount>
+ constexpr StaticStringImpl(const char (&characters)[charactersCount], StringKind stringKind = StringNormal)
+ : m_refCount(s_refCountFlagIsStaticString)
+ , m_length(charactersCount - 1)
+ , m_data8(characters)
+ , m_hashAndFlags(s_hashFlag8BitBuffer | stringKind | BufferInternal | (StringHasher::computeLiteralHashAndMaskTop8Bits(characters) << s_flagCount))
+ {
+ }
+
+ template<unsigned charactersCount>
+ constexpr StaticStringImpl(const char16_t (&characters)[charactersCount], StringKind stringKind = StringNormal)
+ : m_refCount(s_refCountFlagIsStaticString)
+ , m_length(charactersCount - 1)
+ , m_data16(characters)
+ , m_hashAndFlags(stringKind | BufferInternal | (StringHasher::computeLiteralHashAndMaskTop8Bits(characters) << s_flagCount))
+ {
+ }
+
+ operator StringImpl&()
+ {
+ return *reinterpret_cast<StringImpl*>(this);
+ }
+
+ // These member variables must match the layout of StringImpl.
+ unsigned m_refCount;
+ unsigned m_length;
+ union {
+ const char* m_data8;
+ const char16_t* m_data16;
+ };
+ unsigned m_hashAndFlags;
+ };
+
+ WTF_EXPORTDATA static StaticStringImpl s_atomicEmptyString;
+ ALWAYS_INLINE static StringImpl* empty() { return reinterpret_cast<StringImpl*>(&s_atomicEmptyString); }
// FIXME: Does this really belong in StringImpl?
template <typename T> static void copyChars(T* destination, const T* source, unsigned numCharacters)
@@ -652,9 +615,9 @@ public:
// Some string features, like refcounting and the atomicity flag, are not
// thread-safe. We achieve thread safety by isolation, giving each thread
// its own copy of the string.
- PassRef<StringImpl> isolatedCopy() const;
+ Ref<StringImpl> isolatedCopy() const;
- WTF_EXPORT_STRING_API PassRef<StringImpl> substring(unsigned pos, unsigned len = UINT_MAX);
+ WTF_EXPORT_STRING_API Ref<StringImpl> substring(unsigned pos, unsigned len = UINT_MAX);
UChar at(unsigned i) const
{
@@ -686,23 +649,24 @@ public:
double toDouble(bool* ok = 0);
float toFloat(bool* ok = 0);
- WTF_EXPORT_STRING_API PassRef<StringImpl> lower();
- WTF_EXPORT_STRING_API PassRef<StringImpl> upper();
- WTF_EXPORT_STRING_API PassRef<StringImpl> lower(const AtomicString& localeIdentifier);
- WTF_EXPORT_STRING_API PassRef<StringImpl> upper(const AtomicString& localeIdentifier);
+ WTF_EXPORT_STRING_API Ref<StringImpl> convertToASCIILowercase();
+ WTF_EXPORT_STRING_API Ref<StringImpl> convertToASCIIUppercase();
+ WTF_EXPORT_STRING_API Ref<StringImpl> convertToLowercaseWithoutLocale();
+ WTF_EXPORT_STRING_API Ref<StringImpl> convertToLowercaseWithoutLocaleStartingAtFailingIndex8Bit(unsigned);
+ WTF_EXPORT_STRING_API Ref<StringImpl> convertToUppercaseWithoutLocale();
+ WTF_EXPORT_STRING_API Ref<StringImpl> convertToLowercaseWithLocale(const AtomicString& localeIdentifier);
+ WTF_EXPORT_STRING_API Ref<StringImpl> convertToUppercaseWithLocale(const AtomicString& localeIdentifier);
- WTF_EXPORT_STRING_API PassRef<StringImpl> fill(UChar);
- // FIXME: Do we need fill(char) or can we just do the right thing if UChar is ASCII?
- PassRef<StringImpl> foldCase();
+ Ref<StringImpl> foldCase();
- PassRef<StringImpl> stripWhiteSpace();
- PassRef<StringImpl> stripWhiteSpace(IsWhiteSpaceFunctionPtr);
- WTF_EXPORT_STRING_API PassRef<StringImpl> simplifyWhiteSpace();
- PassRef<StringImpl> simplifyWhiteSpace(IsWhiteSpaceFunctionPtr);
+ Ref<StringImpl> stripWhiteSpace();
+ Ref<StringImpl> stripWhiteSpace(IsWhiteSpaceFunctionPtr);
+ WTF_EXPORT_STRING_API Ref<StringImpl> simplifyWhiteSpace();
+ Ref<StringImpl> simplifyWhiteSpace(IsWhiteSpaceFunctionPtr);
- PassRef<StringImpl> removeCharacters(CharacterMatchFunctionPtr);
+ Ref<StringImpl> removeCharacters(CharacterMatchFunctionPtr);
template <typename CharType>
- ALWAYS_INLINE PassRef<StringImpl> removeCharacters(const CharType* characters, CharacterMatchFunctionPtr);
+ ALWAYS_INLINE Ref<StringImpl> removeCharacters(const CharType* characters, CharacterMatchFunctionPtr);
size_t find(LChar character, unsigned start = 0);
size_t find(char character, unsigned start = 0);
@@ -715,33 +679,44 @@ public:
size_t findIgnoringCase(const LChar*, unsigned index = 0);
ALWAYS_INLINE size_t findIgnoringCase(const char* s, unsigned index = 0) { return findIgnoringCase(reinterpret_cast<const LChar*>(s), index); }
WTF_EXPORT_STRING_API size_t findIgnoringCase(StringImpl*, unsigned index = 0);
-
- WTF_EXPORT_STRING_API size_t findNextLineStart(unsigned index = UINT_MAX);
+ WTF_EXPORT_STRING_API size_t findIgnoringASCIICase(const StringImpl&) const;
+ WTF_EXPORT_STRING_API size_t findIgnoringASCIICase(const StringImpl&, unsigned startOffset) const;
+ WTF_EXPORT_STRING_API size_t findIgnoringASCIICase(const StringImpl*) const;
+ WTF_EXPORT_STRING_API size_t findIgnoringASCIICase(const StringImpl*, unsigned startOffset) const;
WTF_EXPORT_STRING_API size_t reverseFind(UChar, unsigned index = UINT_MAX);
WTF_EXPORT_STRING_API size_t reverseFind(StringImpl*, unsigned index = UINT_MAX);
WTF_EXPORT_STRING_API size_t reverseFindIgnoringCase(StringImpl*, unsigned index = UINT_MAX);
WTF_EXPORT_STRING_API bool startsWith(const StringImpl*) const;
+ WTF_EXPORT_STRING_API bool startsWith(const StringImpl&) const;
+ WTF_EXPORT_STRING_API bool startsWithIgnoringASCIICase(const StringImpl*) const;
+ WTF_EXPORT_STRING_API bool startsWithIgnoringASCIICase(const StringImpl&) const;
bool startsWith(StringImpl* str, bool caseSensitive) { return caseSensitive ? startsWith(str) : (reverseFindIgnoringCase(str, 0) == 0); }
WTF_EXPORT_STRING_API bool startsWith(UChar) const;
WTF_EXPORT_STRING_API bool startsWith(const char*, unsigned matchLength, bool caseSensitive) const;
template<unsigned matchLength>
bool startsWith(const char (&prefix)[matchLength], bool caseSensitive = true) const { return startsWith(prefix, matchLength - 1, caseSensitive); }
+ WTF_EXPORT_STRING_API bool hasInfixStartingAt(const StringImpl&, unsigned startOffset) const;
- WTF_EXPORT_STRING_API bool endsWith(StringImpl*, bool caseSensitive = true);
+ WTF_EXPORT_STRING_API bool endsWith(StringImpl*);
+ WTF_EXPORT_STRING_API bool endsWith(StringImpl&);
+ WTF_EXPORT_STRING_API bool endsWithIgnoringASCIICase(const StringImpl*) const;
+ WTF_EXPORT_STRING_API bool endsWithIgnoringASCIICase(const StringImpl&) const;
+ WTF_EXPORT_STRING_API bool endsWith(StringImpl*, bool caseSensitive);
WTF_EXPORT_STRING_API bool endsWith(UChar) const;
WTF_EXPORT_STRING_API bool endsWith(const char*, unsigned matchLength, bool caseSensitive) const;
template<unsigned matchLength>
bool endsWith(const char (&prefix)[matchLength], bool caseSensitive = true) const { return endsWith(prefix, matchLength - 1, caseSensitive); }
+ WTF_EXPORT_STRING_API bool hasInfixEndingAt(const StringImpl&, unsigned endOffset) const;
- WTF_EXPORT_STRING_API PassRef<StringImpl> replace(UChar, UChar);
- WTF_EXPORT_STRING_API PassRef<StringImpl> replace(UChar, StringImpl*);
- ALWAYS_INLINE PassRef<StringImpl> replace(UChar pattern, const char* replacement, unsigned replacementLength) { return replace(pattern, reinterpret_cast<const LChar*>(replacement), replacementLength); }
- WTF_EXPORT_STRING_API PassRef<StringImpl> replace(UChar, const LChar*, unsigned replacementLength);
- PassRef<StringImpl> replace(UChar, const UChar*, unsigned replacementLength);
- WTF_EXPORT_STRING_API PassRef<StringImpl> replace(StringImpl*, StringImpl*);
- WTF_EXPORT_STRING_API PassRef<StringImpl> replace(unsigned index, unsigned len, StringImpl*);
+ WTF_EXPORT_STRING_API Ref<StringImpl> replace(UChar, UChar);
+ WTF_EXPORT_STRING_API Ref<StringImpl> replace(UChar, StringImpl*);
+ ALWAYS_INLINE Ref<StringImpl> replace(UChar pattern, const char* replacement, unsigned replacementLength) { return replace(pattern, reinterpret_cast<const LChar*>(replacement), replacementLength); }
+ WTF_EXPORT_STRING_API Ref<StringImpl> replace(UChar, const LChar*, unsigned replacementLength);
+ Ref<StringImpl> replace(UChar, const UChar*, unsigned replacementLength);
+ WTF_EXPORT_STRING_API Ref<StringImpl> replace(StringImpl*, StringImpl*);
+ WTF_EXPORT_STRING_API Ref<StringImpl> replace(unsigned index, unsigned len, StringImpl*);
WTF_EXPORT_STRING_API UCharDirection defaultWritingDirection(bool* hasStrongDirectionality = nullptr);
@@ -749,14 +724,70 @@ public:
RetainPtr<CFStringRef> createCFString();
#endif
#ifdef __OBJC__
- WTF_EXPORT_STRING_API operator NSString*();
+ WTF_EXPORT_STRING_API operator NSString *();
#endif
-#ifdef STRING_STATS
+#if STRING_STATS
ALWAYS_INLINE static StringStats& stringStats() { return m_stringStats; }
#endif
- WTF_EXPORT_STRING_API static const UChar latin1CaseFoldTable[256];
+protected:
+ ~StringImpl();
+
+ enum CreateSymbolTag { CreateSymbol };
+
+ // Used to create new symbol strings that holds existing 8-bit [[Description]] string as a substring buffer (BufferSubstring).
+ StringImpl(CreateSymbolTag, const LChar* characters, unsigned length)
+ : m_refCount(s_refCountIncrement)
+ , m_length(length)
+ , m_data8(characters)
+ , m_hashAndFlags(s_hashFlag8BitBuffer | StringSymbol | BufferSubstring)
+ {
+ ASSERT(is8Bit());
+ ASSERT(m_data8);
+ STRING_STATS_ADD_8BIT_STRING2(m_length, true);
+ }
+
+ // Used to create new symbol strings that holds existing 16-bit [[Description]] string as a substring buffer (BufferSubstring).
+ StringImpl(CreateSymbolTag, const UChar* characters, unsigned length)
+ : m_refCount(s_refCountIncrement)
+ , m_length(length)
+ , m_data16(characters)
+ , m_hashAndFlags(StringSymbol | BufferSubstring)
+ {
+ ASSERT(!is8Bit());
+ ASSERT(m_data16);
+ STRING_STATS_ADD_16BIT_STRING2(m_length, true);
+ }
+
+ // Null symbol.
+ StringImpl(CreateSymbolTag)
+ : m_refCount(s_refCountIncrement)
+ , m_length(0)
+ , m_data8(empty()->characters8())
+ , m_hashAndFlags(s_hashFlag8BitBuffer | StringSymbol | BufferSubstring)
+ {
+ ASSERT(is8Bit());
+ ASSERT(m_data8);
+ STRING_STATS_ADD_8BIT_STRING2(m_length, true);
+ }
+
+ template<typename T>
+ static size_t allocationSize(unsigned tailElementCount)
+ {
+ return tailOffset<T>() + tailElementCount * sizeof(T);
+ }
+
+ template<typename T>
+ static ptrdiff_t tailOffset()
+ {
+#if COMPILER(MSVC)
+ // MSVC doesn't support alignof yet.
+ return roundUpToMultipleOf<sizeof(T)>(sizeof(StringImpl));
+#else
+ return roundUpToMultipleOf<alignof(T)>(offsetof(StringImpl, m_hashAndFlags) + sizeof(StringImpl::m_hashAndFlags));
+#endif
+ }
private:
bool requiresCopy() const
@@ -765,59 +796,61 @@ private:
return true;
if (is8Bit())
- return reinterpret_cast<const void*>(m_data8) == reinterpret_cast<const void*>(this + 1);
- return reinterpret_cast<const void*>(m_data16) == reinterpret_cast<const void*>(this + 1);
+ return m_data8 == tailPointer<LChar>();
+ return m_data16 == tailPointer<UChar>();
+ }
+
+ template<typename T>
+ const T* tailPointer() const
+ {
+ return reinterpret_cast_ptr<const T*>(reinterpret_cast<const uint8_t*>(this) + tailOffset<T>());
+ }
+
+ template<typename T>
+ T* tailPointer()
+ {
+ return reinterpret_cast_ptr<T*>(reinterpret_cast<uint8_t*>(this) + tailOffset<T>());
+ }
+
+ StringImpl* const& substringBuffer() const
+ {
+ ASSERT(bufferOwnership() == BufferSubstring);
+
+ return *tailPointer<StringImpl*>();
+ }
+
+ StringImpl*& substringBuffer()
+ {
+ ASSERT(bufferOwnership() == BufferSubstring);
+
+ return *tailPointer<StringImpl*>();
}
// This number must be at least 2 to avoid sharing empty, null as well as 1 character strings from SmallStrings.
static const unsigned s_copyCharsInlineCutOff = 20;
+ enum class CaseConvertType { Upper, Lower };
+ template<CaseConvertType type, typename CharacterType> static Ref<StringImpl> convertASCIICase(StringImpl&, const CharacterType*, unsigned);
+
BufferOwnership bufferOwnership() const { return static_cast<BufferOwnership>(m_hashAndFlags & s_hashMaskBufferOwnership); }
- template <class UCharPredicate> PassRef<StringImpl> stripMatchedCharacters(UCharPredicate);
- template <typename CharType, class UCharPredicate> PassRef<StringImpl> simplifyMatchedCharactersToSpace(UCharPredicate);
- template <typename CharType> static PassRef<StringImpl> constructInternal(StringImpl*, unsigned);
- template <typename CharType> static PassRef<StringImpl> createUninitializedInternal(unsigned, CharType*&);
- template <typename CharType> static PassRef<StringImpl> createUninitializedInternalNonEmpty(unsigned, CharType*&);
- template <typename CharType> static PassRef<StringImpl> reallocateInternal(PassRefPtr<StringImpl>, unsigned, CharType*&);
- template <typename CharType> static PassRef<StringImpl> createInternal(const CharType*, unsigned);
- WTF_EXPORT_STRING_API NEVER_INLINE const UChar* getData16SlowCase() const;
+ template <class UCharPredicate> Ref<StringImpl> stripMatchedCharacters(UCharPredicate);
+ template <typename CharType, class UCharPredicate> Ref<StringImpl> simplifyMatchedCharactersToSpace(UCharPredicate);
+ template <typename CharType> static Ref<StringImpl> constructInternal(StringImpl*, unsigned);
+ template <typename CharType> static Ref<StringImpl> createUninitializedInternal(unsigned, CharType*&);
+ template <typename CharType> static Ref<StringImpl> createUninitializedInternalNonEmpty(unsigned, CharType*&);
+ template <typename CharType> static Ref<StringImpl> reallocateInternal(Ref<StringImpl>&&, unsigned, CharType*&);
+ template <typename CharType> static Ref<StringImpl> createInternal(const CharType*, unsigned);
WTF_EXPORT_PRIVATE NEVER_INLINE unsigned hashSlowCase() const;
// The bottom bit in the ref count indicates a static (immortal) string.
static const unsigned s_refCountFlagIsStaticString = 0x1;
static const unsigned s_refCountIncrement = 0x2; // This allows us to ref / deref without disturbing the static string flag.
- // The bottom 7 bits in the hash are flags.
- static const unsigned s_flagCount = 7;
- static const unsigned s_flagMask = (1u << s_flagCount) - 1;
- COMPILE_ASSERT(s_flagCount <= StringHasher::flagCount, StringHasher_reserves_enough_bits_for_StringImpl_flags);
-
- static const unsigned s_hashFlagHas16BitShadow = 1u << 6;
- static const unsigned s_hashFlag8BitBuffer = 1u << 5;
- static const unsigned s_hashFlagIsAtomic = 1u << 4;
- static const unsigned s_hashFlagDidReportCost = 1u << 3;
- static const unsigned s_hashFlagIsIdentifier = 1u << 2;
- static const unsigned s_hashMaskBufferOwnership = 1u | (1u << 1);
-
-#ifdef STRING_STATS
+#if STRING_STATS
WTF_EXPORTDATA static StringStats m_stringStats;
#endif
public:
- struct StaticASCIILiteral {
- // These member variables must match the layout of StringImpl.
- unsigned m_refCount;
- unsigned m_length;
- const LChar* m_data8;
- void* m_buffer;
- unsigned m_hashAndFlags;
-
- // These values mimic ConstructFromLiteral.
- static const unsigned s_initialRefCount = s_refCountIncrement;
- static const unsigned s_initialFlags = s_hashFlag8BitBuffer | BufferInternal;
- static const unsigned s_hashShift = s_flagCount;
- };
-
#ifndef NDEBUG
void assertHashIsCorrect()
{
@@ -827,25 +860,20 @@ public:
#endif
private:
- // These member variables must match the layout of StaticASCIILiteral.
+ // These member variables must match the layout of StaticStringImpl.
unsigned m_refCount;
unsigned m_length;
union {
const LChar* m_data8;
const UChar* m_data16;
};
- union {
- void* m_buffer;
- StringImpl* m_substringBuffer;
- mutable UChar* m_copyData16;
- };
mutable unsigned m_hashAndFlags;
};
-COMPILE_ASSERT(sizeof(StringImpl) == sizeof(StringImpl::StaticASCIILiteral), StringImpl_should_match_its_StaticASCIILiteral);
+static_assert(sizeof(StringImpl) == sizeof(StringImpl::StaticStringImpl), "");
#if !ASSERT_DISABLED
-// StringImpls created from StaticASCIILiteral will ASSERT
+// StringImpls created from StaticStringImpl will ASSERT
// in the generic ValueCheck<T>::checkConsistency
// as they are not allocated by fastMalloc.
// We don't currently have any way to detect that case
@@ -857,15 +885,15 @@ ValueCheck<StringImpl*> {
#endif
template <>
-ALWAYS_INLINE PassRef<StringImpl> StringImpl::constructInternal<LChar>(StringImpl* impl, unsigned length) { return adoptRef(*new (NotNull, impl) StringImpl(length, Force8BitConstructor)); }
+ALWAYS_INLINE Ref<StringImpl> StringImpl::constructInternal<LChar>(StringImpl* impl, unsigned length) { return adoptRef(*new (NotNull, impl) StringImpl(length, Force8BitConstructor)); }
template <>
-ALWAYS_INLINE PassRef<StringImpl> StringImpl::constructInternal<UChar>(StringImpl* impl, unsigned length) { return adoptRef(*new (NotNull, impl) StringImpl(length)); }
+ALWAYS_INLINE Ref<StringImpl> StringImpl::constructInternal<UChar>(StringImpl* impl, unsigned length) { return adoptRef(*new (NotNull, impl) StringImpl(length)); }
template <>
-ALWAYS_INLINE const LChar* StringImpl::getCharacters<LChar>() const { return characters8(); }
+ALWAYS_INLINE const LChar* StringImpl::characters<LChar>() const { return characters8(); }
template <>
-ALWAYS_INLINE const UChar* StringImpl::getCharacters<UChar>() const { return deprecatedCharacters(); }
+ALWAYS_INLINE const UChar* StringImpl::characters<UChar>() const { return characters16(); }
WTF_EXPORT_STRING_API bool equal(const StringImpl*, const StringImpl*);
WTF_EXPORT_STRING_API bool equal(const StringImpl*, const LChar*);
@@ -875,266 +903,20 @@ WTF_EXPORT_STRING_API bool equal(const StringImpl*, const UChar*, unsigned);
inline bool equal(const StringImpl* a, const char* b, unsigned length) { return equal(a, reinterpret_cast<const LChar*>(b), length); }
inline bool equal(const LChar* a, StringImpl* b) { return equal(b, a); }
inline bool equal(const char* a, StringImpl* b) { return equal(b, reinterpret_cast<const LChar*>(a)); }
-WTF_EXPORT_STRING_API bool equalNonNull(const StringImpl* a, const StringImpl* b);
-
-// Do comparisons 8 or 4 bytes-at-a-time on architectures where it's safe.
-#if CPU(X86_64) || CPU(ARM64)
-ALWAYS_INLINE bool equal(const LChar* a, const LChar* b, unsigned length)
-{
- unsigned dwordLength = length >> 3;
-
- if (dwordLength) {
- const uint64_t* aDWordCharacters = reinterpret_cast<const uint64_t*>(a);
- const uint64_t* bDWordCharacters = reinterpret_cast<const uint64_t*>(b);
-
- for (unsigned i = 0; i != dwordLength; ++i) {
- if (*aDWordCharacters++ != *bDWordCharacters++)
- return false;
- }
-
- a = reinterpret_cast<const LChar*>(aDWordCharacters);
- b = reinterpret_cast<const LChar*>(bDWordCharacters);
- }
-
- if (length & 4) {
- if (*reinterpret_cast<const uint32_t*>(a) != *reinterpret_cast<const uint32_t*>(b))
- return false;
-
- a += 4;
- b += 4;
- }
-
- if (length & 2) {
- if (*reinterpret_cast<const uint16_t*>(a) != *reinterpret_cast<const uint16_t*>(b))
- return false;
-
- a += 2;
- b += 2;
- }
-
- if (length & 1 && (*a != *b))
- return false;
-
- return true;
-}
-
-ALWAYS_INLINE bool equal(const UChar* a, const UChar* b, unsigned length)
-{
- unsigned dwordLength = length >> 2;
-
- if (dwordLength) {
- const uint64_t* aDWordCharacters = reinterpret_cast<const uint64_t*>(a);
- const uint64_t* bDWordCharacters = reinterpret_cast<const uint64_t*>(b);
-
- for (unsigned i = 0; i != dwordLength; ++i) {
- if (*aDWordCharacters++ != *bDWordCharacters++)
- return false;
- }
-
- a = reinterpret_cast<const UChar*>(aDWordCharacters);
- b = reinterpret_cast<const UChar*>(bDWordCharacters);
- }
-
- if (length & 2) {
- if (*reinterpret_cast<const uint32_t*>(a) != *reinterpret_cast<const uint32_t*>(b))
- return false;
-
- a += 2;
- b += 2;
- }
-
- if (length & 1 && (*a != *b))
- return false;
-
- return true;
-}
-#elif CPU(X86)
-ALWAYS_INLINE bool equal(const LChar* a, const LChar* b, unsigned length)
-{
- const uint32_t* aCharacters = reinterpret_cast<const uint32_t*>(a);
- const uint32_t* bCharacters = reinterpret_cast<const uint32_t*>(b);
-
- unsigned wordLength = length >> 2;
- for (unsigned i = 0; i != wordLength; ++i) {
- if (*aCharacters++ != *bCharacters++)
- return false;
- }
-
- length &= 3;
-
- if (length) {
- const LChar* aRemainder = reinterpret_cast<const LChar*>(aCharacters);
- const LChar* bRemainder = reinterpret_cast<const LChar*>(bCharacters);
-
- for (unsigned i = 0; i < length; ++i) {
- if (aRemainder[i] != bRemainder[i])
- return false;
- }
- }
-
- return true;
-}
-
-ALWAYS_INLINE bool equal(const UChar* a, const UChar* b, unsigned length)
-{
- const uint32_t* aCharacters = reinterpret_cast<const uint32_t*>(a);
- const uint32_t* bCharacters = reinterpret_cast<const uint32_t*>(b);
-
- unsigned wordLength = length >> 1;
- for (unsigned i = 0; i != wordLength; ++i) {
- if (*aCharacters++ != *bCharacters++)
- return false;
- }
-
- if (length & 1 && *reinterpret_cast<const UChar*>(aCharacters) != *reinterpret_cast<const UChar*>(bCharacters))
- return false;
-
- return true;
-}
-#elif PLATFORM(IOS) && WTF_ARM_ARCH_AT_LEAST(7)
-ALWAYS_INLINE bool equal(const LChar* a, const LChar* b, unsigned length)
-{
- bool isEqual = false;
- uint32_t aValue;
- uint32_t bValue;
- asm("subs %[length], #4\n"
- "blo 2f\n"
-
- "0:\n" // Label 0 = Start of loop over 32 bits.
- "ldr %[aValue], [%[a]], #4\n"
- "ldr %[bValue], [%[b]], #4\n"
- "cmp %[aValue], %[bValue]\n"
- "bne 66f\n"
- "subs %[length], #4\n"
- "bhs 0b\n"
-
- // At this point, length can be:
- // -0: 00000000000000000000000000000000 (0 bytes left)
- // -1: 11111111111111111111111111111111 (3 bytes left)
- // -2: 11111111111111111111111111111110 (2 bytes left)
- // -3: 11111111111111111111111111111101 (1 byte left)
- // -4: 11111111111111111111111111111100 (length was 0)
- // The pointers are at the correct position.
- "2:\n" // Label 2 = End of loop over 32 bits, check for pair of characters.
- "tst %[length], #2\n"
- "beq 1f\n"
- "ldrh %[aValue], [%[a]], #2\n"
- "ldrh %[bValue], [%[b]], #2\n"
- "cmp %[aValue], %[bValue]\n"
- "bne 66f\n"
-
- "1:\n" // Label 1 = Check for a single character left.
- "tst %[length], #1\n"
- "beq 42f\n"
- "ldrb %[aValue], [%[a]]\n"
- "ldrb %[bValue], [%[b]]\n"
- "cmp %[aValue], %[bValue]\n"
- "bne 66f\n"
-
- "42:\n" // Label 42 = Success.
- "mov %[isEqual], #1\n"
- "66:\n" // Label 66 = End without changing isEqual to 1.
- : [length]"+r"(length), [isEqual]"+r"(isEqual), [a]"+r"(a), [b]"+r"(b), [aValue]"+r"(aValue), [bValue]"+r"(bValue)
- :
- :
- );
- return isEqual;
-}
-
-ALWAYS_INLINE bool equal(const UChar* a, const UChar* b, unsigned length)
-{
- bool isEqual = false;
- uint32_t aValue;
- uint32_t bValue;
- asm("subs %[length], #2\n"
- "blo 1f\n"
-
- "0:\n" // Label 0 = Start of loop over 32 bits.
- "ldr %[aValue], [%[a]], #4\n"
- "ldr %[bValue], [%[b]], #4\n"
- "cmp %[aValue], %[bValue]\n"
- "bne 66f\n"
- "subs %[length], #2\n"
- "bhs 0b\n"
-
- // At this point, length can be:
- // -0: 00000000000000000000000000000000 (0 bytes left)
- // -1: 11111111111111111111111111111111 (1 character left, 2 bytes)
- // -2: 11111111111111111111111111111110 (length was zero)
- // The pointers are at the correct position.
- "1:\n" // Label 1 = Check for a single character left.
- "tst %[length], #1\n"
- "beq 42f\n"
- "ldrh %[aValue], [%[a]]\n"
- "ldrh %[bValue], [%[b]]\n"
- "cmp %[aValue], %[bValue]\n"
- "bne 66f\n"
-
- "42:\n" // Label 42 = Success.
- "mov %[isEqual], #1\n"
- "66:\n" // Label 66 = End without changing isEqual to 1.
- : [length]"+r"(length), [isEqual]"+r"(isEqual), [a]"+r"(a), [b]"+r"(b), [aValue]"+r"(aValue), [bValue]"+r"(bValue)
- :
- :
- );
- return isEqual;
-}
-#else
-ALWAYS_INLINE bool equal(const LChar* a, const LChar* b, unsigned length) { return !memcmp(a, b, length); }
-ALWAYS_INLINE bool equal(const UChar* a, const UChar* b, unsigned length) { return !memcmp(a, b, length * sizeof(UChar)); }
-#endif
-
-ALWAYS_INLINE bool equal(const LChar* a, const UChar* b, unsigned length)
-{
- for (unsigned i = 0; i < length; ++i) {
- if (a[i] != b[i])
- return false;
- }
- return true;
-}
-
-ALWAYS_INLINE bool equal(const UChar* a, const LChar* b, unsigned length) { return equal(b, a, length); }
-
-WTF_EXPORT_STRING_API bool equalIgnoringCase(const StringImpl*, const StringImpl*);
-WTF_EXPORT_STRING_API bool equalIgnoringCase(const StringImpl*, const LChar*);
-inline bool equalIgnoringCase(const LChar* a, const StringImpl* b) { return equalIgnoringCase(b, a); }
-WTF_EXPORT_STRING_API bool equalIgnoringCase(const LChar*, const LChar*, unsigned);
-WTF_EXPORT_STRING_API bool equalIgnoringCase(const UChar*, const LChar*, unsigned);
-inline bool equalIgnoringCase(const UChar* a, const char* b, unsigned length) { return equalIgnoringCase(a, reinterpret_cast<const LChar*>(b), length); }
-inline bool equalIgnoringCase(const LChar* a, const UChar* b, unsigned length) { return equalIgnoringCase(b, a, length); }
-inline bool equalIgnoringCase(const char* a, const UChar* b, unsigned length) { return equalIgnoringCase(b, reinterpret_cast<const LChar*>(a), length); }
-inline bool equalIgnoringCase(const char* a, const LChar* b, unsigned length) { return equalIgnoringCase(b, reinterpret_cast<const LChar*>(a), length); }
-inline bool equalIgnoringCase(const UChar* a, const UChar* b, int length)
-{
- ASSERT(length >= 0);
- return !u_memcasecmp(a, b, length, U_FOLD_CASE_DEFAULT);
-}
-WTF_EXPORT_STRING_API bool equalIgnoringCaseNonNull(const StringImpl*, const StringImpl*);
+WTF_EXPORT_STRING_API bool equal(const StringImpl& a, const StringImpl& b);
WTF_EXPORT_STRING_API bool equalIgnoringNullity(StringImpl*, StringImpl*);
+WTF_EXPORT_STRING_API bool equalIgnoringNullity(const UChar*, size_t length, StringImpl*);
-template<typename CharacterType>
-inline size_t find(const CharacterType* characters, unsigned length, CharacterType matchCharacter, unsigned index = 0)
-{
- while (index < length) {
- if (characters[index] == matchCharacter)
- return index;
- ++index;
- }
- return notFound;
-}
+bool equalIgnoringASCIICase(const StringImpl&, const StringImpl&);
+WTF_EXPORT_STRING_API bool equalIgnoringASCIICase(const StringImpl*, const StringImpl*);
+bool equalIgnoringASCIICase(const StringImpl&, const char*);
+bool equalIgnoringASCIICase(const StringImpl*, const char*);
-ALWAYS_INLINE size_t find(const UChar* characters, unsigned length, LChar matchCharacter, unsigned index = 0)
-{
- return find(characters, length, static_cast<UChar>(matchCharacter), index);
-}
+WTF_EXPORT_STRING_API bool equalIgnoringASCIICaseNonNull(const StringImpl*, const StringImpl*);
-inline size_t find(const LChar* characters, unsigned length, UChar matchCharacter, unsigned index = 0)
-{
- if (matchCharacter & ~0xFF)
- return notFound;
- return find(characters, length, static_cast<LChar>(matchCharacter), index);
-}
+template<unsigned length> bool equalLettersIgnoringASCIICase(const StringImpl&, const char (&lowercaseLetters)[length]);
+template<unsigned length> bool equalLettersIgnoringASCIICase(const StringImpl*, const char (&lowercaseLetters)[length]);
inline size_t find(const LChar* characters, unsigned length, CharacterMatchFunctionPtr matchFunction, unsigned index = 0)
{
@@ -1157,37 +939,6 @@ inline size_t find(const UChar* characters, unsigned length, CharacterMatchFunct
}
template<typename CharacterType>
-inline size_t findNextLineStart(const CharacterType* characters, unsigned length, unsigned index = 0)
-{
- while (index < length) {
- CharacterType c = characters[index++];
- if ((c != '\n') && (c != '\r'))
- continue;
-
- // There can only be a start of a new line if there are more characters
- // beyond the current character.
- if (index < length) {
- // The 3 common types of line terminators are 1. \r\n (Windows),
- // 2. \r (old MacOS) and 3. \n (Unix'es).
-
- if (c == '\n')
- return index; // Case 3: just \n.
-
- CharacterType c2 = characters[index];
- if (c2 != '\n')
- return index; // Case 2: just \r.
-
- // Case 1: \r\n.
- // But, there's only a start of a new line if there are more
- // characters beyond the \r\n.
- if (++index < length)
- return index;
- }
- }
- return notFound;
-}
-
-template<typename CharacterType>
inline size_t reverseFindLineTerminator(const CharacterType* characters, unsigned length, unsigned index = UINT_MAX)
{
if (!length)
@@ -1248,18 +999,13 @@ inline size_t StringImpl::find(UChar character, unsigned start)
return WTF::find(characters16(), m_length, character, start);
}
-template<size_t inlineCapacity>
-bool equalIgnoringNullity(const Vector<UChar, inlineCapacity>& a, StringImpl* b)
+template<size_t inlineCapacity> inline bool equalIgnoringNullity(const Vector<UChar, inlineCapacity>& a, StringImpl* b)
{
- if (!b)
- return !a.size();
- if (a.size() != b->length())
- return false;
- return !memcmp(a.data(), b->deprecatedCharacters(), b->length() * sizeof(UChar));
+ return equalIgnoringNullity(a.data(), a.size(), b);
}
template<typename CharacterType1, typename CharacterType2>
-static inline int codePointCompare(unsigned l1, unsigned l2, const CharacterType1* c1, const CharacterType2* c2)
+inline int codePointCompare(unsigned l1, unsigned l2, const CharacterType1* c1, const CharacterType2* c2)
{
const unsigned lmin = l1 < l2 ? l1 : l2;
unsigned pos = 0;
@@ -1278,22 +1024,22 @@ static inline int codePointCompare(unsigned l1, unsigned l2, const CharacterType
return (l1 > l2) ? 1 : -1;
}
-static inline int codePointCompare8(const StringImpl* string1, const StringImpl* string2)
+inline int codePointCompare8(const StringImpl* string1, const StringImpl* string2)
{
return codePointCompare(string1->length(), string2->length(), string1->characters8(), string2->characters8());
}
-static inline int codePointCompare16(const StringImpl* string1, const StringImpl* string2)
+inline int codePointCompare16(const StringImpl* string1, const StringImpl* string2)
{
return codePointCompare(string1->length(), string2->length(), string1->characters16(), string2->characters16());
}
-static inline int codePointCompare8To16(const StringImpl* string1, const StringImpl* string2)
+inline int codePointCompare8To16(const StringImpl* string1, const StringImpl* string2)
{
return codePointCompare(string1->length(), string2->length(), string1->characters8(), string2->characters16());
}
-static inline int codePointCompare(const StringImpl* string1, const StringImpl* string2)
+inline int codePointCompare(const StringImpl* string1, const StringImpl* string2)
{
if (!string1)
return (string2 && string2->length()) ? -1 : 0;
@@ -1313,7 +1059,7 @@ static inline int codePointCompare(const StringImpl* string1, const StringImpl*
return codePointCompare16(string1, string2);
}
-static inline bool isSpaceOrNewline(UChar c)
+inline bool isSpaceOrNewline(UChar c)
{
// Use isASCIISpace() for basic Latin-1.
// This will include newlines, which aren't included in Unicode DirWS.
@@ -1332,7 +1078,7 @@ inline unsigned lengthOfNullTerminatedString(const CharacterType* string)
return static_cast<unsigned>(length);
}
-inline PassRef<StringImpl> StringImpl::isolatedCopy() const
+inline Ref<StringImpl> StringImpl::isolatedCopy() const
{
if (!requiresCopy()) {
if (is8Bit())
@@ -1345,8 +1091,6 @@ inline PassRef<StringImpl> StringImpl::isolatedCopy() const
return create(m_data16, m_length);
}
-struct StringHash;
-
// StringHash is the default hash for StringImpl* and RefPtr<StringImpl>
template<typename T> struct DefaultHash;
template<> struct DefaultHash<StringImpl*> {
@@ -1356,11 +1100,45 @@ template<> struct DefaultHash<RefPtr<StringImpl>> {
typedef StringHash Hash;
};
+inline bool equalIgnoringASCIICase(const StringImpl& a, const StringImpl& b)
+{
+ return equalIgnoringASCIICaseCommon(a, b);
+}
+
+inline bool equalIgnoringASCIICase(const StringImpl& a, const char* b)
+{
+ return equalIgnoringASCIICaseCommon(a, b);
+}
+
+inline bool equalIgnoringASCIICase(const StringImpl* a, const char* b)
+{
+ return a && equalIgnoringASCIICase(*a, b);
+}
+
+template<unsigned length> inline bool startsWithLettersIgnoringASCIICase(const StringImpl& string, const char (&lowercaseLetters)[length])
+{
+ return startsWithLettersIgnoringASCIICaseCommon(string, lowercaseLetters);
+}
+
+template<unsigned length> inline bool startsWithLettersIgnoringASCIICase(const StringImpl* string, const char (&lowercaseLetters)[length])
+{
+ return string && startsWithLettersIgnoringASCIICase(*string, lowercaseLetters);
+}
+
+template<unsigned length> inline bool equalLettersIgnoringASCIICase(const StringImpl& string, const char (&lowercaseLetters)[length])
+{
+ return equalLettersIgnoringASCIICaseCommon(string, lowercaseLetters);
+}
+
+template<unsigned length> inline bool equalLettersIgnoringASCIICase(const StringImpl* string, const char (&lowercaseLetters)[length])
+{
+ return string && equalLettersIgnoringASCIICase(*string, lowercaseLetters);
+}
+
} // namespace WTF
using WTF::StringImpl;
using WTF::equal;
-using WTF::equalNonNull;
using WTF::TextCaseSensitivity;
using WTF::TextCaseSensitive;
using WTF::TextCaseInsensitive;
diff --git a/Source/WTF/wtf/text/StringOperators.h b/Source/WTF/wtf/text/StringOperators.h
index cfd2f6d6c..5a2435658 100644
--- a/Source/WTF/wtf/text/StringOperators.h
+++ b/Source/WTF/wtf/text/StringOperators.h
@@ -35,10 +35,10 @@ public:
operator String() const
{
- RefPtr<StringImpl> resultImpl = tryMakeString(m_string1, m_string2);
- if (!resultImpl)
+ String result = tryMakeString(m_string1, m_string2);
+ if (!result)
CRASH();
- return resultImpl.release();
+ return result;
}
operator AtomicString() const
@@ -97,6 +97,8 @@ public:
void writeTo(LChar* destination) { m_buffer.writeTo(destination); }
void writeTo(UChar* destination) { m_buffer.writeTo(destination); }
+ String toString() const { return m_buffer; }
+
private:
StringAppend<StringType1, StringType2>& m_buffer;
};
diff --git a/Source/WTF/wtf/text/StringStatics.cpp b/Source/WTF/wtf/text/StringStatics.cpp
index 8f0c74cc0..0c2119c1d 100644
--- a/Source/WTF/wtf/text/StringStatics.cpp
+++ b/Source/WTF/wtf/text/StringStatics.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2010, 2016 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,8 +30,8 @@
#endif
#include "AtomicString.h"
-#include "DynamicAnnotations.h"
#include "MainThread.h"
+#include "NeverDestroyed.h"
#include "StaticConstructors.h"
#include "StringImpl.h"
@@ -41,29 +41,11 @@
namespace WTF {
-StringImpl* StringImpl::empty()
-{
- // FIXME: This works around a bug in our port of PCRE, that a regular expression
- // run on the empty string may still perform a read from the first element, and
- // as such we need this to be a valid pointer. No code should ever be reading
- // from a zero length string, so this should be able to be a non-null pointer
- // into the zero-page.
- // Replace this with 'reinterpret_cast<UChar*>(static_cast<intptr_t>(1))' once
- // PCRE goes away.
- static LChar emptyLCharData = 0;
- DEFINE_STATIC_LOCAL(StringImpl, emptyString, (&emptyLCharData, 0, ConstructStaticString));
- WTF_ANNOTATE_BENIGN_RACE(&emptyString, "Benign race on StringImpl::emptyString reference counter");
- return &emptyString;
-}
-
WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, nullAtom)
WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, emptyAtom)
-WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, textAtom)
-WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, commentAtom)
WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, starAtom)
WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, xmlAtom)
WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, xmlnsAtom)
-WTF_EXPORTDATA DEFINE_GLOBAL(AtomicString, xlinkAtom)
NEVER_INLINE unsigned StringImpl::hashSlowCase() const
{
@@ -74,6 +56,17 @@ NEVER_INLINE unsigned StringImpl::hashSlowCase() const
return existingHash();
}
+unsigned StringImpl::concurrentHash() const
+{
+ unsigned hash;
+ if (is8Bit())
+ hash = StringHasher::computeHashAndMaskTop8Bits(m_data8, m_length);
+ else
+ hash = StringHasher::computeHashAndMaskTop8Bits(m_data16, m_length);
+ ASSERT(((hash << s_flagCount) >> s_flagCount) == hash);
+ return hash;
+}
+
void AtomicString::init()
{
static bool initialized;
@@ -84,12 +77,9 @@ void AtomicString::init()
// Use placement new to initialize the globals.
new (NotNull, (void*)&nullAtom) AtomicString;
new (NotNull, (void*)&emptyAtom) AtomicString("");
- new (NotNull, (void*)&textAtom) AtomicString("#text", AtomicString::ConstructFromLiteral);
- new (NotNull, (void*)&commentAtom) AtomicString("#comment", AtomicString::ConstructFromLiteral);
new (NotNull, (void*)&starAtom) AtomicString("*", AtomicString::ConstructFromLiteral);
new (NotNull, (void*)&xmlAtom) AtomicString("xml", AtomicString::ConstructFromLiteral);
new (NotNull, (void*)&xmlnsAtom) AtomicString("xmlns", AtomicString::ConstructFromLiteral);
- new (NotNull, (void*)&xlinkAtom) AtomicString("xlink", AtomicString::ConstructFromLiteral);
initialized = true;
}
diff --git a/Source/WTF/wtf/text/StringView.cpp b/Source/WTF/wtf/text/StringView.cpp
new file mode 100644
index 000000000..580799765
--- /dev/null
+++ b/Source/WTF/wtf/text/StringView.cpp
@@ -0,0 +1,285 @@
+/*
+
+Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "config.h"
+#include "StringView.h"
+
+#include <mutex>
+#include <unicode/ubrk.h>
+#include <wtf/HashMap.h>
+#include <wtf/Lock.h>
+#include <wtf/NeverDestroyed.h>
+#include <wtf/Optional.h>
+#include <wtf/text/TextBreakIterator.h>
+#include <wtf/unicode/UTF8.h>
+
+namespace WTF {
+
+using namespace Unicode;
+
+bool StringView::containsIgnoringASCIICase(const StringView& matchString) const
+{
+ return findIgnoringASCIICase(matchString) != notFound;
+}
+
+bool StringView::containsIgnoringASCIICase(const StringView& matchString, unsigned startOffset) const
+{
+ return findIgnoringASCIICase(matchString, startOffset) != notFound;
+}
+
+size_t StringView::findIgnoringASCIICase(const StringView& matchString) const
+{
+ return ::WTF::findIgnoringASCIICase(*this, matchString, 0);
+}
+
+size_t StringView::findIgnoringASCIICase(const StringView& matchString, unsigned startOffset) const
+{
+ return ::WTF::findIgnoringASCIICase(*this, matchString, startOffset);
+}
+
+bool StringView::startsWith(const StringView& prefix) const
+{
+ return ::WTF::startsWith(*this, prefix);
+}
+
+bool StringView::startsWithIgnoringASCIICase(const StringView& prefix) const
+{
+ return ::WTF::startsWithIgnoringASCIICase(*this, prefix);
+}
+
+bool StringView::endsWith(const StringView& suffix) const
+{
+ return ::WTF::endsWith(*this, suffix);
+}
+
+bool StringView::endsWithIgnoringASCIICase(const StringView& suffix) const
+{
+ return ::WTF::endsWithIgnoringASCIICase(*this, suffix);
+}
+
+CString StringView::utf8(ConversionMode mode) const
+{
+ if (isNull())
+ return CString("", 0);
+ if (is8Bit())
+ return StringImpl::utf8ForCharacters(characters8(), length());
+ return StringImpl::utf8ForCharacters(characters16(), length(), mode);
+}
+
+size_t StringView::find(StringView matchString, unsigned start) const
+{
+ return findCommon(*this, matchString, start);
+}
+
+void StringView::SplitResult::Iterator::findNextSubstring()
+{
+ for (size_t separatorPosition; (separatorPosition = m_result.m_string.find(m_result.m_separator, m_position)) != notFound; ++m_position) {
+ if (separatorPosition > m_position) {
+ m_length = separatorPosition - m_position;
+ return;
+ }
+ }
+ m_length = m_result.m_string.length() - m_position;
+}
+
+auto StringView::SplitResult::Iterator::operator++() -> Iterator&
+{
+ ASSERT(m_position < m_result.m_string.length());
+ m_position += m_length;
+ if (m_position < m_result.m_string.length()) {
+ ++m_position;
+ findNextSubstring();
+ }
+ return *this;
+}
+
+class StringView::GraphemeClusters::Iterator::Impl {
+public:
+ Impl(const StringView& stringView, std::optional<NonSharedCharacterBreakIterator>&& iterator, unsigned index)
+ : m_stringView(stringView)
+ , m_iterator(WTFMove(iterator))
+ , m_index(index)
+ , m_indexEnd(computeIndexEnd())
+ {
+ }
+
+ void operator++()
+ {
+ ASSERT(m_indexEnd > m_index);
+ m_index = m_indexEnd;
+ m_indexEnd = computeIndexEnd();
+ }
+
+ StringView operator*() const
+ {
+ if (m_stringView.is8Bit())
+ return StringView(m_stringView.characters8() + m_index, m_indexEnd - m_index);
+ return StringView(m_stringView.characters16() + m_index, m_indexEnd - m_index);
+ }
+
+ bool operator==(const Impl& other) const
+ {
+ ASSERT(&m_stringView == &other.m_stringView);
+ auto result = m_index == other.m_index;
+ ASSERT(!result || m_indexEnd == other.m_indexEnd);
+ return result;
+ }
+
+ unsigned computeIndexEnd()
+ {
+ if (!m_iterator)
+ return 0;
+ if (m_index == m_stringView.length())
+ return m_index;
+ return ubrk_following(m_iterator.value(), m_index);
+ }
+
+private:
+ const StringView& m_stringView;
+ std::optional<NonSharedCharacterBreakIterator> m_iterator;
+ unsigned m_index;
+ unsigned m_indexEnd;
+};
+
+StringView::GraphemeClusters::Iterator::Iterator(const StringView& stringView, unsigned index)
+ : m_impl(std::make_unique<Impl>(stringView, stringView.isNull() ? std::nullopt : std::optional<NonSharedCharacterBreakIterator>(NonSharedCharacterBreakIterator(stringView)), index))
+{
+}
+
+StringView::GraphemeClusters::Iterator::~Iterator()
+{
+}
+
+StringView::GraphemeClusters::Iterator::Iterator(Iterator&& other)
+ : m_impl(WTFMove(other.m_impl))
+{
+}
+
+auto StringView::GraphemeClusters::Iterator::operator++() -> Iterator&
+{
+ ++(*m_impl);
+ return *this;
+}
+
+StringView StringView::GraphemeClusters::Iterator::operator*() const
+{
+ return **m_impl;
+}
+
+bool StringView::GraphemeClusters::Iterator::operator==(const Iterator& other) const
+{
+ return *m_impl == *(other.m_impl);
+}
+
+bool StringView::GraphemeClusters::Iterator::operator!=(const Iterator& other) const
+{
+ return !(*this == other);
+}
+
+#if CHECK_STRINGVIEW_LIFETIME
+
+// Manage reference count manually so UnderlyingString does not need to be defined in the header.
+
+struct StringView::UnderlyingString {
+ std::atomic_uint refCount { 1u };
+ bool isValid { true };
+ const StringImpl& string;
+ explicit UnderlyingString(const StringImpl&);
+};
+
+StringView::UnderlyingString::UnderlyingString(const StringImpl& string)
+ : string(string)
+{
+}
+
+static StaticLock underlyingStringsMutex;
+
+static HashMap<const StringImpl*, StringView::UnderlyingString*>& underlyingStrings()
+{
+ static NeverDestroyed<HashMap<const StringImpl*, StringView::UnderlyingString*>> map;
+ return map;
+}
+
+void StringView::invalidate(const StringImpl& stringToBeDestroyed)
+{
+ UnderlyingString* underlyingString;
+ {
+ std::lock_guard<StaticLock> lock(underlyingStringsMutex);
+ underlyingString = underlyingStrings().take(&stringToBeDestroyed);
+ if (!underlyingString)
+ return;
+ }
+ ASSERT(underlyingString->isValid);
+ underlyingString->isValid = false;
+}
+
+bool StringView::underlyingStringIsValid() const
+{
+ return !m_underlyingString || m_underlyingString->isValid;
+}
+
+void StringView::adoptUnderlyingString(UnderlyingString* underlyingString)
+{
+ if (m_underlyingString) {
+ std::lock_guard<StaticLock> lock(underlyingStringsMutex);
+ if (!--m_underlyingString->refCount) {
+ if (m_underlyingString->isValid) {
+ underlyingStrings().remove(&m_underlyingString->string);
+ }
+ delete m_underlyingString;
+ }
+ }
+ m_underlyingString = underlyingString;
+}
+
+void StringView::setUnderlyingString(const StringImpl* string)
+{
+ UnderlyingString* underlyingString;
+ if (!string)
+ underlyingString = nullptr;
+ else {
+ std::lock_guard<StaticLock> lock(underlyingStringsMutex);
+ auto result = underlyingStrings().add(string, nullptr);
+ if (result.isNewEntry)
+ result.iterator->value = new UnderlyingString(*string);
+ else
+ ++result.iterator->value->refCount;
+ underlyingString = result.iterator->value;
+ }
+ adoptUnderlyingString(underlyingString);
+}
+
+void StringView::setUnderlyingString(const StringView& otherString)
+{
+ UnderlyingString* underlyingString = otherString.m_underlyingString;
+ if (underlyingString)
+ ++underlyingString->refCount;
+ adoptUnderlyingString(underlyingString);
+}
+
+#endif // CHECK_STRINGVIEW_LIFETIME
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/text/StringView.h b/Source/WTF/wtf/text/StringView.h
index 70f4eb0cb..ef209f947 100644
--- a/Source/WTF/wtf/text/StringView.h
+++ b/Source/WTF/wtf/text/StringView.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,132 +26,916 @@
#ifndef StringView_h
#define StringView_h
-#include <wtf/text/WTFString.h>
+#include <limits.h>
+#include <unicode/utypes.h>
+#include <wtf/Forward.h>
+#include <wtf/RetainPtr.h>
+#include <wtf/Vector.h>
+#include <wtf/text/CString.h>
+#include <wtf/text/ConversionMode.h>
+#include <wtf/text/LChar.h>
+#include <wtf/text/StringCommon.h>
+
+// FIXME: Enabling the StringView lifetime checking causes the MSVC build to fail. Figure out why.
+#if defined(NDEBUG) || COMPILER(MSVC)
+#define CHECK_STRINGVIEW_LIFETIME 0
+#else
+#define CHECK_STRINGVIEW_LIFETIME 1
+#endif
namespace WTF {
+using CharacterMatchFunction = bool (*)(UChar);
+
// StringView is a non-owning reference to a string, similar to the proposed std::string_view.
-// Whether the string is 8-bit or 16-bit is encoded in the upper bit of the length member.
-// This means that strings longer than 2 Gigabytes can not be represented. If that turns out to be
-// a problem we can investigate alternative solutions.
class StringView {
public:
- StringView()
- : m_characters(nullptr)
- , m_length(0)
- {
+ StringView();
+#if CHECK_STRINGVIEW_LIFETIME
+ ~StringView();
+ StringView(StringView&&);
+ StringView(const StringView&);
+ StringView& operator=(StringView&&);
+ StringView& operator=(const StringView&);
+#endif
+
+ StringView(const AtomicString&);
+ StringView(const String&);
+ StringView(const StringImpl&);
+ StringView(const StringImpl*);
+ StringView(const LChar*, unsigned length);
+ StringView(const UChar*, unsigned length);
+ StringView(const char*);
+
+ static StringView empty();
+
+ unsigned length() const;
+ bool isEmpty() const;
+
+ explicit operator bool() const;
+ bool isNull() const;
+
+ UChar operator[](unsigned index) const;
+
+ class CodeUnits;
+ CodeUnits codeUnits() const;
+
+ class CodePoints;
+ CodePoints codePoints() const;
+
+ class GraphemeClusters;
+ GraphemeClusters graphemeClusters() const;
+
+ bool is8Bit() const;
+ const LChar* characters8() const;
+ const UChar* characters16() const;
+
+ String toString() const;
+ String toStringWithoutCopying() const;
+ AtomicString toAtomicString() const;
+
+#if USE(CF)
+ // This function converts null strings to empty strings.
+ WTF_EXPORT_STRING_API RetainPtr<CFStringRef> createCFStringWithoutCopying() const;
+#endif
+
+#ifdef __OBJC__
+ // These functions convert null strings to empty strings.
+ WTF_EXPORT_STRING_API RetainPtr<NSString> createNSString() const;
+ WTF_EXPORT_STRING_API RetainPtr<NSString> createNSStringWithoutCopying() const;
+#endif
+
+ WTF_EXPORT_STRING_API CString utf8(ConversionMode = LenientConversion) const;
+
+ class UpconvertedCharacters;
+ UpconvertedCharacters upconvertedCharacters() const;
+
+ void getCharactersWithUpconvert(LChar*) const;
+ void getCharactersWithUpconvert(UChar*) const;
+
+ StringView substring(unsigned start, unsigned length = std::numeric_limits<unsigned>::max()) const;
+ StringView left(unsigned len) const { return substring(0, len); }
+ StringView right(unsigned len) const { return substring(length() - len, len); }
+
+ class SplitResult;
+ SplitResult split(UChar) const;
+
+ size_t find(UChar, unsigned start = 0) const;
+ size_t find(CharacterMatchFunction, unsigned start = 0) const;
+
+ WTF_EXPORT_STRING_API size_t find(StringView, unsigned start) const;
+
+ size_t reverseFind(UChar, unsigned index = UINT_MAX) const;
+
+ WTF_EXPORT_STRING_API size_t findIgnoringASCIICase(const StringView&) const;
+ WTF_EXPORT_STRING_API size_t findIgnoringASCIICase(const StringView&, unsigned startOffset) const;
+
+ bool contains(UChar) const;
+ WTF_EXPORT_STRING_API bool containsIgnoringASCIICase(const StringView&) const;
+ WTF_EXPORT_STRING_API bool containsIgnoringASCIICase(const StringView&, unsigned startOffset) const;
+
+ WTF_EXPORT_STRING_API bool startsWith(const StringView&) const;
+ WTF_EXPORT_STRING_API bool startsWithIgnoringASCIICase(const StringView&) const;
+
+ WTF_EXPORT_STRING_API bool endsWith(const StringView&) const;
+ WTF_EXPORT_STRING_API bool endsWithIgnoringASCIICase(const StringView&) const;
+
+ int toInt() const;
+ int toInt(bool& isValid) const;
+ int toIntStrict(bool& isValid) const;
+ float toFloat(bool& isValid) const;
+
+ static void invalidate(const StringImpl&);
+
+ struct UnderlyingString;
+
+private:
+ friend bool equal(StringView, StringView);
+
+ void initialize(const LChar*, unsigned length);
+ void initialize(const UChar*, unsigned length);
+
+#if CHECK_STRINGVIEW_LIFETIME
+ WTF_EXPORT_STRING_API bool underlyingStringIsValid() const;
+ WTF_EXPORT_STRING_API void setUnderlyingString(const StringImpl*);
+ WTF_EXPORT_STRING_API void setUnderlyingString(const StringView&);
+#else
+ bool underlyingStringIsValid() const { return true; }
+ void setUnderlyingString(const StringImpl*) { }
+ void setUnderlyingString(const StringView&) { }
+#endif
+ void clear();
+
+ const void* m_characters { nullptr };
+ unsigned m_length { 0 };
+ bool m_is8Bit { true };
+
+#if CHECK_STRINGVIEW_LIFETIME
+ void adoptUnderlyingString(UnderlyingString*);
+ UnderlyingString* m_underlyingString { nullptr };
+#endif
+};
+
+template<typename CharacterType, size_t inlineCapacity> void append(Vector<CharacterType, inlineCapacity>&, StringView);
+
+bool equal(StringView, StringView);
+bool equal(StringView, const LChar*);
+bool equal(StringView, const char*);
+
+bool equalIgnoringASCIICase(StringView, StringView);
+bool equalIgnoringASCIICase(StringView, const char*);
+
+template<unsigned length> bool equalLettersIgnoringASCIICase(StringView, const char (&lowercaseLetters)[length]);
+
+inline bool operator==(StringView a, StringView b) { return equal(a, b); }
+inline bool operator==(StringView a, const LChar* b) { return equal(a, b); }
+inline bool operator==(StringView a, const char* b) { return equal(a, b); }
+inline bool operator==(const LChar* a, StringView b) { return equal(b, a); }
+inline bool operator==(const char* a, StringView b) { return equal(b, a); }
+
+inline bool operator!=(StringView a, StringView b) { return !equal(a, b); }
+inline bool operator!=(StringView a, const LChar* b) { return !equal(a, b); }
+inline bool operator!=(StringView a, const char* b) { return !equal(a, b); }
+inline bool operator!=(const LChar* a, StringView b) { return !equal(b, a); }
+inline bool operator!=(const char* a, StringView b) { return !equal(b, a); }
+
+}
+
+#include <wtf/text/AtomicString.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+inline StringView::StringView()
+{
+ // FIXME: It's peculiar that null strings are 16-bit and empty strings return 8-bit (according to the is8Bit function).
+}
+
+#if CHECK_STRINGVIEW_LIFETIME
+inline StringView::~StringView()
+{
+ setUnderlyingString(nullptr);
+}
+
+inline StringView::StringView(StringView&& other)
+ : m_characters(other.m_characters)
+ , m_length(other.m_length)
+ , m_is8Bit(other.m_is8Bit)
+{
+ ASSERT(other.underlyingStringIsValid());
+
+ other.clear();
+
+ setUnderlyingString(other);
+ other.setUnderlyingString(nullptr);
+}
+
+inline StringView::StringView(const StringView& other)
+ : m_characters(other.m_characters)
+ , m_length(other.m_length)
+ , m_is8Bit(other.m_is8Bit)
+{
+ ASSERT(other.underlyingStringIsValid());
+
+ setUnderlyingString(other);
+}
+
+inline StringView& StringView::operator=(StringView&& other)
+{
+ ASSERT(other.underlyingStringIsValid());
+
+ m_characters = other.m_characters;
+ m_length = other.m_length;
+ m_is8Bit = other.m_is8Bit;
+
+ other.clear();
+
+ setUnderlyingString(other);
+ other.setUnderlyingString(nullptr);
+
+ return *this;
+}
+
+inline StringView& StringView::operator=(const StringView& other)
+{
+ ASSERT(other.underlyingStringIsValid());
+
+ m_characters = other.m_characters;
+ m_length = other.m_length;
+ m_is8Bit = other.m_is8Bit;
+
+ setUnderlyingString(other);
+
+ return *this;
+}
+#endif // CHECK_STRINGVIEW_LIFETIME
+
+inline void StringView::initialize(const LChar* characters, unsigned length)
+{
+ m_characters = characters;
+ m_length = length;
+ m_is8Bit = true;
+}
+
+inline void StringView::initialize(const UChar* characters, unsigned length)
+{
+ m_characters = characters;
+ m_length = length;
+ m_is8Bit = false;
+}
+
+inline StringView::StringView(const LChar* characters, unsigned length)
+{
+ initialize(characters, length);
+}
+
+inline StringView::StringView(const UChar* characters, unsigned length)
+{
+ initialize(characters, length);
+}
+
+inline StringView::StringView(const char* characters)
+{
+ initialize(reinterpret_cast<const LChar*>(characters), strlen(characters));
+}
+
+inline StringView::StringView(const StringImpl& string)
+{
+ setUnderlyingString(&string);
+ if (string.is8Bit())
+ initialize(string.characters8(), string.length());
+ else
+ initialize(string.characters16(), string.length());
+}
+
+inline StringView::StringView(const StringImpl* string)
+{
+ if (!string)
+ return;
+
+ setUnderlyingString(string);
+ if (string->is8Bit())
+ initialize(string->characters8(), string->length());
+ else
+ initialize(string->characters16(), string->length());
+}
+
+inline StringView::StringView(const String& string)
+{
+ setUnderlyingString(string.impl());
+ if (!string.impl()) {
+ clear();
+ return;
+ }
+ if (string.is8Bit()) {
+ initialize(string.characters8(), string.length());
+ return;
}
+ initialize(string.characters16(), string.length());
+}
- StringView(const LChar* characters, unsigned length)
- {
- initialize(characters, length);
+inline StringView::StringView(const AtomicString& atomicString)
+ : StringView(atomicString.string())
+{
+}
+
+inline void StringView::clear()
+{
+ m_characters = nullptr;
+ m_length = 0;
+ m_is8Bit = true;
+}
+
+inline StringView StringView::empty()
+{
+ return StringView(reinterpret_cast<const LChar*>(""), 0);
+}
+
+inline const LChar* StringView::characters8() const
+{
+ ASSERT(is8Bit());
+ ASSERT(underlyingStringIsValid());
+ return static_cast<const LChar*>(m_characters);
+}
+
+inline const UChar* StringView::characters16() const
+{
+ ASSERT(!is8Bit());
+ ASSERT(underlyingStringIsValid());
+ return static_cast<const UChar*>(m_characters);
+}
+
+class StringView::UpconvertedCharacters {
+public:
+ explicit UpconvertedCharacters(const StringView&);
+ operator const UChar*() const { return m_characters; }
+ const UChar* get() const { return m_characters; }
+private:
+ Vector<UChar, 32> m_upconvertedCharacters;
+ const UChar* m_characters;
+};
+
+inline StringView::UpconvertedCharacters StringView::upconvertedCharacters() const
+{
+ return UpconvertedCharacters(*this);
+}
+
+inline bool StringView::isNull() const
+{
+ return !m_characters;
+}
+
+inline bool StringView::isEmpty() const
+{
+ return !length();
+}
+
+inline unsigned StringView::length() const
+{
+ return m_length;
+}
+
+inline StringView::operator bool() const
+{
+ return !isNull();
+}
+
+inline bool StringView::is8Bit() const
+{
+ return m_is8Bit;
+}
+
+inline StringView StringView::substring(unsigned start, unsigned length) const
+{
+ if (start >= this->length())
+ return empty();
+ unsigned maxLength = this->length() - start;
+
+ if (length >= maxLength) {
+ if (!start)
+ return *this;
+ length = maxLength;
}
- StringView(const UChar* characters, unsigned length)
- {
- initialize(characters, length);
+ if (is8Bit()) {
+ StringView result(characters8() + start, length);
+ result.setUnderlyingString(*this);
+ return result;
}
+ StringView result(characters16() + start, length);
+ result.setUnderlyingString(*this);
+ return result;
+}
- StringView(const String& string)
- : m_characters(nullptr)
- , m_length(0)
- {
- if (!string.impl())
- return;
-
- if (string.is8Bit())
- initialize(string.characters8(), string.length());
- else
- initialize(string.characters16(), string.length());
+inline UChar StringView::operator[](unsigned index) const
+{
+ ASSERT(index < length());
+ if (is8Bit())
+ return characters8()[index];
+ return characters16()[index];
+}
+
+inline bool StringView::contains(UChar character) const
+{
+ return find(character) != notFound;
+}
+
+inline void StringView::getCharactersWithUpconvert(LChar* destination) const
+{
+ ASSERT(is8Bit());
+ auto characters8 = this->characters8();
+ for (unsigned i = 0; i < m_length; ++i)
+ destination[i] = characters8[i];
+}
+
+inline void StringView::getCharactersWithUpconvert(UChar* destination) const
+{
+ if (is8Bit()) {
+ auto characters8 = this->characters8();
+ for (unsigned i = 0; i < m_length; ++i)
+ destination[i] = characters8[i];
+ return;
}
+ auto characters16 = this->characters16();
+ for (unsigned i = 0; i < m_length; ++i)
+ destination[i] = characters16[i];
+}
- static StringView empty()
- {
- return StringView(reinterpret_cast<const LChar*>(""), 0);
+inline StringView::UpconvertedCharacters::UpconvertedCharacters(const StringView& string)
+{
+ if (!string.is8Bit()) {
+ m_characters = string.characters16();
+ return;
}
+ const LChar* characters8 = string.characters8();
+ unsigned length = string.m_length;
+ m_upconvertedCharacters.reserveInitialCapacity(length);
+ for (unsigned i = 0; i < length; ++i)
+ m_upconvertedCharacters.uncheckedAppend(characters8[i]);
+ m_characters = m_upconvertedCharacters.data();
+}
- const LChar* characters8() const
- {
- ASSERT(is8Bit());
+inline String StringView::toString() const
+{
+ if (is8Bit())
+ return String(characters8(), m_length);
+ return String(characters16(), m_length);
+}
- return static_cast<const LChar*>(m_characters);
- }
+inline AtomicString StringView::toAtomicString() const
+{
+ if (is8Bit())
+ return AtomicString(characters8(), m_length);
+ return AtomicString(characters16(), m_length);
+}
- const UChar* characters16() const
- {
- ASSERT(!is8Bit());
+inline float StringView::toFloat(bool& isValid) const
+{
+ if (is8Bit())
+ return charactersToFloat(characters8(), m_length, &isValid);
+ return charactersToFloat(characters16(), m_length, &isValid);
+}
- return static_cast<const UChar*>(m_characters);
- }
+inline int StringView::toInt() const
+{
+ bool isValid;
+ return toInt(isValid);
+}
- bool isNull() const { return !m_characters; }
- bool isEmpty() const { return !length(); }
- unsigned length() const { return m_length & ~is16BitStringFlag; }
+inline int StringView::toInt(bool& isValid) const
+{
+ if (is8Bit())
+ return charactersToInt(characters8(), m_length, &isValid);
+ return charactersToInt(characters16(), m_length, &isValid);
+}
- explicit operator bool() const { return !isNull(); }
+inline int StringView::toIntStrict(bool& isValid) const
+{
+ if (is8Bit())
+ return charactersToIntStrict(characters8(), m_length, &isValid);
+ return charactersToIntStrict(characters16(), m_length, &isValid);
+}
- bool is8Bit() const { return !(m_length & is16BitStringFlag); }
+inline String StringView::toStringWithoutCopying() const
+{
+ if (is8Bit())
+ return StringImpl::createWithoutCopying(characters8(), m_length);
+ return StringImpl::createWithoutCopying(characters16(), m_length);
+}
- StringView substring(unsigned start, unsigned length = std::numeric_limits<unsigned>::max()) const
- {
- if (start >= this->length())
- return empty();
- unsigned maxLength = this->length() - start;
+inline size_t StringView::find(UChar character, unsigned start) const
+{
+ if (is8Bit())
+ return WTF::find(characters8(), m_length, character, start);
+ return WTF::find(characters16(), m_length, character, start);
+}
- if (length >= maxLength) {
- if (!start)
- return *this;
- length = maxLength;
- }
+inline size_t StringView::find(CharacterMatchFunction matchFunction, unsigned start) const
+{
+ if (is8Bit())
+ return WTF::find(characters8(), m_length, matchFunction, start);
+ return WTF::find(characters16(), m_length, matchFunction, start);
+}
- if (is8Bit())
- return StringView(characters8() + start, length);
+inline size_t StringView::reverseFind(UChar character, unsigned index) const
+{
+ if (is8Bit())
+ return WTF::reverseFind(characters8(), m_length, character, index);
+ return WTF::reverseFind(characters16(), m_length, character, index);
+}
- return StringView(characters16() + start, length);
- }
+#if !CHECK_STRINGVIEW_LIFETIME
+inline void StringView::invalidate(const StringImpl&)
+{
+}
+#endif
- String toString() const
- {
- if (is8Bit())
- return String(characters8(), length());
+template<typename StringType> class StringTypeAdapter;
- return String(characters16(), length());
+template<> class StringTypeAdapter<StringView> {
+public:
+ StringTypeAdapter<StringView>(StringView string)
+ : m_string(string)
+ {
}
- String toStringWithoutCopying() const
- {
- if (is8Bit())
- return StringImpl::createWithoutCopying(characters8(), length());
+ unsigned length() { return m_string.length(); }
+ bool is8Bit() { return m_string.is8Bit(); }
+ void writeTo(LChar* destination) { m_string.getCharactersWithUpconvert(destination); }
+ void writeTo(UChar* destination) { m_string.getCharactersWithUpconvert(destination); }
- return StringImpl::createWithoutCopying(characters16(), length());
- }
+ String toString() const { return m_string.toString(); }
private:
- void initialize(const LChar* characters, unsigned length)
- {
- ASSERT(!(length & is16BitStringFlag));
-
- m_characters = characters;
- m_length = length;
- }
+ StringView m_string;
+};
- void initialize(const UChar* characters, unsigned length)
- {
- ASSERT(!(length & is16BitStringFlag));
-
- m_characters = characters;
- m_length = is16BitStringFlag | length;
+template<typename CharacterType, size_t inlineCapacity> void append(Vector<CharacterType, inlineCapacity>& buffer, StringView string)
+{
+ unsigned oldSize = buffer.size();
+ buffer.grow(oldSize + string.length());
+ string.getCharactersWithUpconvert(buffer.data() + oldSize);
+}
+
+inline bool equal(StringView a, StringView b)
+{
+ if (a.m_characters == b.m_characters) {
+ ASSERT(a.is8Bit() == b.is8Bit());
+ return a.length() == b.length();
}
+
+ return equalCommon(a, b);
+}
+
+inline bool equal(StringView a, const LChar* b)
+{
+ if (!b)
+ return !a.isEmpty();
+ if (a.isEmpty())
+ return !b;
+ unsigned aLength = a.length();
+ if (a.is8Bit())
+ return equal(a.characters8(), b, aLength);
+ return equal(a.characters16(), b, aLength);
+}
- static const unsigned is16BitStringFlag = 1u << 31;
+inline bool equal(StringView a, const char* b)
+{
+ return equal(a, reinterpret_cast<const LChar*>(b));
+}
- const void* m_characters;
+inline bool equalIgnoringASCIICase(StringView a, StringView b)
+{
+ return equalIgnoringASCIICaseCommon(a, b);
+}
+
+inline bool equalIgnoringASCIICase(StringView a, const char* b)
+{
+ return equalIgnoringASCIICaseCommon(a, b);
+}
+
+class StringView::SplitResult {
+public:
+ explicit SplitResult(StringView, UChar separator);
+
+ class Iterator;
+ Iterator begin() const;
+ Iterator end() const;
+
+private:
+ StringView m_string;
+ UChar m_separator;
+};
+
+class StringView::GraphemeClusters {
+public:
+ explicit GraphemeClusters(const StringView&);
+
+ class Iterator;
+ Iterator begin() const;
+ Iterator end() const;
+
+private:
+ StringView m_stringView;
+};
+
+class StringView::CodePoints {
+public:
+ explicit CodePoints(const StringView&);
+
+ class Iterator;
+ Iterator begin() const;
+ Iterator end() const;
+
+private:
+ StringView m_stringView;
+};
+
+class StringView::CodeUnits {
+public:
+ explicit CodeUnits(const StringView&);
+
+ class Iterator;
+ Iterator begin() const;
+ Iterator end() const;
+
+private:
+ StringView m_stringView;
+};
+
+class StringView::SplitResult::Iterator {
+public:
+ StringView operator*() const;
+
+ WTF_EXPORT_PRIVATE Iterator& operator++();
+
+ bool operator==(const Iterator&) const;
+ bool operator!=(const Iterator&) const;
+
+private:
+ enum PositionTag { AtEnd };
+ Iterator(const SplitResult&);
+ Iterator(const SplitResult&, PositionTag);
+
+ WTF_EXPORT_PRIVATE void findNextSubstring();
+
+ friend SplitResult;
+
+ const SplitResult& m_result;
+ unsigned m_position { 0 };
unsigned m_length;
};
+class StringView::GraphemeClusters::Iterator {
+public:
+ WTF_EXPORT_PRIVATE Iterator() = delete;
+ WTF_EXPORT_PRIVATE Iterator(const StringView&, unsigned index);
+ WTF_EXPORT_PRIVATE ~Iterator();
+
+ Iterator(const Iterator&) = delete;
+ WTF_EXPORT_PRIVATE Iterator(Iterator&&);
+ Iterator& operator=(const Iterator&) = delete;
+ Iterator& operator=(Iterator&&) = delete;
+
+ WTF_EXPORT_PRIVATE StringView operator*() const;
+ WTF_EXPORT_PRIVATE Iterator& operator++();
+
+ WTF_EXPORT_PRIVATE bool operator==(const Iterator&) const;
+ WTF_EXPORT_PRIVATE bool operator!=(const Iterator&) const;
+
+private:
+ class Impl;
+
+ std::unique_ptr<Impl> m_impl;
+};
+
+class StringView::CodePoints::Iterator {
+public:
+ Iterator(const StringView&, unsigned index);
+
+ UChar32 operator*() const;
+ Iterator& operator++();
+
+ bool operator==(const Iterator&) const;
+ bool operator!=(const Iterator&) const;
+ Iterator& operator=(const Iterator&);
+
+private:
+ std::reference_wrapper<const StringView> m_stringView;
+ std::optional<unsigned> m_nextCodePointOffset;
+ UChar32 m_codePoint;
+};
+
+class StringView::CodeUnits::Iterator {
+public:
+ Iterator(const StringView&, unsigned index);
+
+ UChar operator*() const;
+ Iterator& operator++();
+
+ bool operator==(const Iterator&) const;
+ bool operator!=(const Iterator&) const;
+
+private:
+ const StringView& m_stringView;
+ unsigned m_index;
+};
+
+inline auto StringView::graphemeClusters() const -> GraphemeClusters
+{
+ return GraphemeClusters(*this);
+}
+
+inline auto StringView::codePoints() const -> CodePoints
+{
+ return CodePoints(*this);
+}
+
+inline auto StringView::codeUnits() const -> CodeUnits
+{
+ return CodeUnits(*this);
+}
+
+inline StringView::GraphemeClusters::GraphemeClusters(const StringView& stringView)
+ : m_stringView(stringView)
+{
+}
+
+inline auto StringView::GraphemeClusters::begin() const -> Iterator
+{
+ return Iterator(m_stringView, 0);
+}
+
+inline auto StringView::GraphemeClusters::end() const -> Iterator
+{
+ return Iterator(m_stringView, m_stringView.length());
+}
+
+inline StringView::CodePoints::CodePoints(const StringView& stringView)
+ : m_stringView(stringView)
+{
+}
+
+inline StringView::CodePoints::Iterator::Iterator(const StringView& stringView, unsigned index)
+ : m_stringView(stringView)
+ , m_nextCodePointOffset(index)
+{
+ operator++();
+}
+
+inline auto StringView::CodePoints::Iterator::operator++() -> Iterator&
+{
+ ASSERT(m_nextCodePointOffset);
+ if (m_nextCodePointOffset.value() == m_stringView.get().length()) {
+ m_nextCodePointOffset = std::nullopt;
+ return *this;
+ }
+ if (m_stringView.get().is8Bit())
+ m_codePoint = m_stringView.get().characters8()[m_nextCodePointOffset.value()++];
+ else
+ U16_NEXT(m_stringView.get().characters16(), m_nextCodePointOffset.value(), m_stringView.get().length(), m_codePoint);
+ ASSERT(m_nextCodePointOffset.value() <= m_stringView.get().length());
+ return *this;
+}
+
+inline auto StringView::CodePoints::Iterator::operator=(const Iterator& other) -> Iterator&
+{
+ m_stringView = other.m_stringView;
+ m_nextCodePointOffset = other.m_nextCodePointOffset;
+ m_codePoint = other.m_codePoint;
+ return *this;
+}
+
+inline UChar32 StringView::CodePoints::Iterator::operator*() const
+{
+ ASSERT(m_nextCodePointOffset);
+ return m_codePoint;
+}
+
+inline bool StringView::CodePoints::Iterator::operator==(const Iterator& other) const
+{
+ ASSERT(&m_stringView.get() == &other.m_stringView.get());
+ return m_nextCodePointOffset == other.m_nextCodePointOffset;
+}
+
+inline bool StringView::CodePoints::Iterator::operator!=(const Iterator& other) const
+{
+ return !(*this == other);
+}
+
+inline auto StringView::CodePoints::begin() const -> Iterator
+{
+ return Iterator(m_stringView, 0);
+}
+
+inline auto StringView::CodePoints::end() const -> Iterator
+{
+ return Iterator(m_stringView, m_stringView.length());
+}
+
+inline StringView::CodeUnits::CodeUnits(const StringView& stringView)
+ : m_stringView(stringView)
+{
+}
+
+inline StringView::CodeUnits::Iterator::Iterator(const StringView& stringView, unsigned index)
+ : m_stringView(stringView)
+ , m_index(index)
+{
+}
+
+inline auto StringView::CodeUnits::Iterator::operator++() -> Iterator&
+{
+ ++m_index;
+ return *this;
+}
+
+inline UChar StringView::CodeUnits::Iterator::operator*() const
+{
+ return m_stringView[m_index];
+}
+
+inline bool StringView::CodeUnits::Iterator::operator==(const Iterator& other) const
+{
+ ASSERT(&m_stringView == &other.m_stringView);
+ return m_index == other.m_index;
+}
+
+inline bool StringView::CodeUnits::Iterator::operator!=(const Iterator& other) const
+{
+ return !(*this == other);
+}
+
+inline auto StringView::CodeUnits::begin() const -> Iterator
+{
+ return Iterator(m_stringView, 0);
+}
+
+inline auto StringView::CodeUnits::end() const -> Iterator
+{
+ return Iterator(m_stringView, m_stringView.length());
+}
+
+inline auto StringView::split(UChar separator) const -> SplitResult
+{
+ return SplitResult { *this, separator };
+}
+
+inline StringView::SplitResult::SplitResult(StringView stringView, UChar separator)
+ : m_string { stringView }
+ , m_separator { separator }
+{
+}
+
+inline auto StringView::SplitResult::begin() const -> Iterator
+{
+ return Iterator { *this };
+}
+
+inline auto StringView::SplitResult::end() const -> Iterator
+{
+ return Iterator { *this, Iterator::AtEnd };
+}
+
+inline StringView::SplitResult::Iterator::Iterator(const SplitResult& result)
+ : m_result { result }
+{
+ findNextSubstring();
+}
+
+inline StringView::SplitResult::Iterator::Iterator(const SplitResult& result, PositionTag)
+ : m_result { result }
+ , m_position { result.m_string.length() }
+{
+}
+
+inline StringView StringView::SplitResult::Iterator::operator*() const
+{
+ ASSERT(m_position < m_result.m_string.length());
+ return m_result.m_string.substring(m_position, m_length);
+}
+
+inline bool StringView::SplitResult::Iterator::operator==(const Iterator& other) const
+{
+ ASSERT(&m_result == &other.m_result);
+ return m_position == other.m_position;
+}
+
+inline bool StringView::SplitResult::Iterator::operator!=(const Iterator& other) const
+{
+ return !(*this == other);
+}
+
+template<unsigned length> inline bool equalLettersIgnoringASCIICase(StringView string, const char (&lowercaseLetters)[length])
+{
+ return equalLettersIgnoringASCIICaseCommon(string, lowercaseLetters);
+}
+
} // namespace WTF
+using WTF::append;
+using WTF::equal;
using WTF::StringView;
#endif // StringView_h
diff --git a/Source/WTF/wtf/text/SymbolImpl.cpp b/Source/WTF/wtf/text/SymbolImpl.cpp
new file mode 100644
index 000000000..18ebea9e0
--- /dev/null
+++ b/Source/WTF/wtf/text/SymbolImpl.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SymbolImpl.h"
+
+namespace WTF {
+
+// In addition to the normal hash value, store specialized hash value for
+// symbolized StringImpl*. And don't use the normal hash value for symbolized
+// StringImpl* when they are treated as Identifiers. Unique nature of these
+// symbolized StringImpl* keys means that we don't need them to match any other
+// string (in fact, that's exactly the oposite of what we want!), and the
+// normal hash would lead to lots of conflicts.
+unsigned SymbolImpl::nextHashForSymbol()
+{
+ static unsigned s_nextHashForSymbol = 0;
+ s_nextHashForSymbol += 1 << s_flagCount;
+ s_nextHashForSymbol |= 1 << 31;
+ return s_nextHashForSymbol;
+}
+
+Ref<SymbolImpl> SymbolImpl::create(StringImpl& rep)
+{
+ auto* ownerRep = (rep.bufferOwnership() == BufferSubstring) ? rep.substringBuffer() : &rep;
+ ASSERT(ownerRep->bufferOwnership() != BufferSubstring);
+ if (rep.is8Bit())
+ return adoptRef(*new SymbolImpl(rep.m_data8, rep.length(), *ownerRep));
+ return adoptRef(*new SymbolImpl(rep.m_data16, rep.length(), *ownerRep));
+}
+
+Ref<SymbolImpl> SymbolImpl::createNullSymbol()
+{
+ return adoptRef(*new SymbolImpl);
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/text/SymbolImpl.h b/Source/WTF/wtf/text/SymbolImpl.h
new file mode 100644
index 000000000..293da0a59
--- /dev/null
+++ b/Source/WTF/wtf/text/SymbolImpl.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2015-2016 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/text/UniquedStringImpl.h>
+
+namespace WTF {
+
+// SymbolImpl is used to represent the symbol string impl.
+// It is uniqued string impl, but is not registered in Atomic String tables, so it's not atomic.
+class SymbolImpl : public UniquedStringImpl {
+private:
+ static constexpr const unsigned s_flagIsNullSymbol = 1u;
+
+public:
+ unsigned hashForSymbol() const { return m_hashForSymbol; }
+ SymbolRegistry* const& symbolRegistry() const { return m_symbolRegistry; }
+ SymbolRegistry*& symbolRegistry() { return m_symbolRegistry; }
+ bool isNullSymbol() const { return m_flags & s_flagIsNullSymbol; }
+
+ WTF_EXPORT_STRING_API static Ref<SymbolImpl> createNullSymbol();
+ WTF_EXPORT_STRING_API static Ref<SymbolImpl> create(StringImpl& rep);
+
+ Ref<StringImpl> extractFoldedString()
+ {
+ ASSERT(substringBuffer());
+ ASSERT(substringBuffer() == m_owner);
+ ASSERT(!substringBuffer()->isSymbol());
+ return createSubstringSharingImpl(*this, 0, length());
+ }
+
+private:
+ WTF_EXPORT_PRIVATE static unsigned nextHashForSymbol();
+
+ friend class StringImpl;
+
+ SymbolImpl(const LChar* characters, unsigned length, Ref<StringImpl>&& base)
+ : UniquedStringImpl(CreateSymbol, characters, length)
+ , m_owner(&base.leakRef())
+ , m_hashForSymbol(nextHashForSymbol())
+ {
+ ASSERT(StringImpl::tailOffset<StringImpl*>() == OBJECT_OFFSETOF(SymbolImpl, m_owner));
+ }
+
+ SymbolImpl(const UChar* characters, unsigned length, Ref<StringImpl>&& base)
+ : UniquedStringImpl(CreateSymbol, characters, length)
+ , m_owner(&base.leakRef())
+ , m_hashForSymbol(nextHashForSymbol())
+ {
+ ASSERT(StringImpl::tailOffset<StringImpl*>() == OBJECT_OFFSETOF(SymbolImpl, m_owner));
+ }
+
+ SymbolImpl()
+ : UniquedStringImpl(CreateSymbol)
+ , m_owner(StringImpl::empty())
+ , m_hashForSymbol(nextHashForSymbol())
+ , m_flags(s_flagIsNullSymbol)
+ {
+ ASSERT(StringImpl::tailOffset<StringImpl*>() == OBJECT_OFFSETOF(SymbolImpl, m_owner));
+ }
+
+ // The pointer to the owner string should be immediately following after the StringImpl layout,
+ // since we would like to align the layout of SymbolImpl to the one of BufferSubstring StringImpl.
+ StringImpl* m_owner;
+ SymbolRegistry* m_symbolRegistry { nullptr };
+ unsigned m_hashForSymbol;
+ unsigned m_flags { 0 };
+};
+
+inline unsigned StringImpl::symbolAwareHash() const
+{
+ if (isSymbol())
+ return static_cast<const SymbolImpl*>(this)->hashForSymbol();
+ return hash();
+}
+
+inline unsigned StringImpl::existingSymbolAwareHash() const
+{
+ if (isSymbol())
+ return static_cast<const SymbolImpl*>(this)->hashForSymbol();
+ return existingHash();
+}
+
+#if !ASSERT_DISABLED
+// SymbolImpls created from StaticStringImpl will ASSERT
+// in the generic ValueCheck<T>::checkConsistency
+// as they are not allocated by fastMalloc.
+// We don't currently have any way to detect that case
+// so we ignore the consistency check for all SymbolImpls*.
+template<> struct
+ValueCheck<SymbolImpl*> {
+ static void checkConsistency(const SymbolImpl*) { }
+};
+
+template<> struct
+ValueCheck<const SymbolImpl*> {
+ static void checkConsistency(const SymbolImpl*) { }
+};
+#endif
+
+} // namespace WTF
+
+using WTF::SymbolImpl;
diff --git a/Source/WTF/wtf/text/SymbolRegistry.cpp b/Source/WTF/wtf/text/SymbolRegistry.cpp
new file mode 100644
index 000000000..264bc5ca4
--- /dev/null
+++ b/Source/WTF/wtf/text/SymbolRegistry.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SymbolRegistry.h"
+
+namespace WTF {
+
+SymbolRegistry::~SymbolRegistry()
+{
+ for (auto& key : m_table)
+ static_cast<SymbolImpl&>(*key.impl()).symbolRegistry() = nullptr;
+}
+
+Ref<SymbolImpl> SymbolRegistry::symbolForKey(const String& rep)
+{
+ auto addResult = m_table.add(SymbolRegistryKey(rep.impl()));
+ if (!addResult.isNewEntry)
+ return *static_cast<SymbolImpl*>(addResult.iterator->impl());
+
+ auto symbol = SymbolImpl::create(*rep.impl());
+ symbol->symbolRegistry() = this;
+ *addResult.iterator = SymbolRegistryKey(&symbol.get());
+ return symbol;
+}
+
+String SymbolRegistry::keyForSymbol(SymbolImpl& uid)
+{
+ ASSERT(uid.symbolRegistry() == this);
+ return uid.extractFoldedString();
+}
+
+void SymbolRegistry::remove(SymbolImpl& uid)
+{
+ ASSERT(uid.symbolRegistry() == this);
+ auto iterator = m_table.find(SymbolRegistryKey(&uid));
+ ASSERT_WITH_MESSAGE(iterator != m_table.end(), "The string being removed is registered in the string table of an other thread!");
+ m_table.remove(iterator);
+}
+
+}
diff --git a/Source/WTF/wtf/text/SymbolRegistry.h b/Source/WTF/wtf/text/SymbolRegistry.h
new file mode 100644
index 000000000..06d276834
--- /dev/null
+++ b/Source/WTF/wtf/text/SymbolRegistry.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_SymbolRegistry_h
+#define WTF_SymbolRegistry_h
+
+#include <wtf/HashSet.h>
+#include <wtf/text/StringHash.h>
+#include <wtf/text/SymbolImpl.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+// Since StringImpl* used for Symbol uid doesn't have a hash value reflecting the string content,
+// to compare with an external string in string contents, introduce SymbolRegistryKey.
+// SymbolRegistryKey holds a hash value reflecting the string content additionally.
+class SymbolRegistryKey {
+public:
+ SymbolRegistryKey() = default;
+ explicit SymbolRegistryKey(StringImpl* uid);
+ SymbolRegistryKey(WTF::HashTableDeletedValueType);
+
+ unsigned hash() const { return m_hash; }
+ StringImpl* impl() const { return m_impl; }
+
+ bool isHashTableDeletedValue() const { return m_impl == hashTableDeletedValue(); }
+
+private:
+ static StringImpl* hashTableDeletedValue() { return reinterpret_cast<StringImpl*>(-1); }
+
+ StringImpl* m_impl { nullptr };
+ unsigned m_hash { 0 };
+};
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<SymbolRegistryKey> {
+ struct Hash : StringHash {
+ static unsigned hash(const SymbolRegistryKey& key)
+ {
+ return key.hash();
+ }
+ static bool equal(const SymbolRegistryKey& a, const SymbolRegistryKey& b)
+ {
+ return StringHash::equal(a.impl(), b.impl());
+ }
+ };
+};
+
+template<> struct HashTraits<SymbolRegistryKey> : SimpleClassHashTraits<SymbolRegistryKey> {
+ static const bool hasIsEmptyValueFunction = true;
+ static bool isEmptyValue(const SymbolRegistryKey& key)
+ {
+ return key.impl() == nullptr;
+ }
+};
+
+class SymbolRegistry {
+ WTF_MAKE_NONCOPYABLE(SymbolRegistry);
+public:
+ SymbolRegistry() = default;
+ WTF_EXPORT_PRIVATE ~SymbolRegistry();
+
+ WTF_EXPORT_PRIVATE Ref<SymbolImpl> symbolForKey(const String&);
+ WTF_EXPORT_PRIVATE String keyForSymbol(SymbolImpl&);
+
+ void remove(SymbolImpl&);
+
+private:
+ HashSet<SymbolRegistryKey> m_table;
+};
+
+inline SymbolRegistryKey::SymbolRegistryKey(StringImpl* uid)
+ : m_impl(uid)
+{
+ if (uid->isSymbol()) {
+ if (uid->is8Bit())
+ m_hash = StringHasher::computeHashAndMaskTop8Bits(uid->characters8(), uid->length());
+ else
+ m_hash = StringHasher::computeHashAndMaskTop8Bits(uid->characters16(), uid->length());
+ } else
+ m_hash = uid->hash();
+}
+
+inline SymbolRegistryKey::SymbolRegistryKey(WTF::HashTableDeletedValueType)
+ : m_impl(hashTableDeletedValue())
+{
+}
+
+}
+
+#endif
diff --git a/Source/WTF/wtf/text/TextBreakIterator.cpp b/Source/WTF/wtf/text/TextBreakIterator.cpp
new file mode 100644
index 000000000..1edc32a5d
--- /dev/null
+++ b/Source/WTF/wtf/text/TextBreakIterator.cpp
@@ -0,0 +1,448 @@
+/*
+ * (C) 1999 Lars Knoll (knoll@kde.org)
+ * Copyright (C) 2004-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include "config.h"
+#include "TextBreakIterator.h"
+
+#include "LineBreakIteratorPoolICU.h"
+#include "TextBreakIteratorInternalICU.h"
+#include "UTextProviderLatin1.h"
+#include "UTextProviderUTF16.h"
+#include <atomic>
+#include <mutex>
+#include <unicode/ubrk.h>
+#include <wtf/text/StringBuilder.h>
+
+// FIXME: This needs a better name
+#define ADDITIONAL_EMOJI_SUPPORT (PLATFORM(IOS) || (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100))
+
+namespace WTF {
+
+// Iterator initialization
+
+static UBreakIterator* initializeIterator(UBreakIteratorType type, const char* locale = currentTextBreakLocaleID())
+{
+ UErrorCode openStatus = U_ZERO_ERROR;
+ UBreakIterator* iterator = ubrk_open(type, locale, 0, 0, &openStatus);
+ ASSERT_WITH_MESSAGE(U_SUCCESS(openStatus), "ICU could not open a break iterator: %s (%d)", u_errorName(openStatus), openStatus);
+ return iterator;
+}
+
+#if !PLATFORM(IOS)
+
+static UBreakIterator* initializeIteratorWithRules(const char* breakRules)
+{
+ UParseError parseStatus;
+ UErrorCode openStatus = U_ZERO_ERROR;
+ unsigned length = strlen(breakRules);
+ auto upconvertedCharacters = StringView(reinterpret_cast<const LChar*>(breakRules), length).upconvertedCharacters();
+ UBreakIterator* iterator = ubrk_openRules(upconvertedCharacters, length, 0, 0, &parseStatus, &openStatus);
+ ASSERT_WITH_MESSAGE(U_SUCCESS(openStatus), "ICU could not open a break iterator: %s (%d)", u_errorName(openStatus), openStatus);
+ return iterator;
+}
+
+#endif
+
+
+// Iterator text setting
+
+static UBreakIterator* setTextForIterator(UBreakIterator& iterator, StringView string)
+{
+ if (string.is8Bit()) {
+ UTextWithBuffer textLocal;
+ textLocal.text = UTEXT_INITIALIZER;
+ textLocal.text.extraSize = sizeof(textLocal.buffer);
+ textLocal.text.pExtra = textLocal.buffer;
+
+ UErrorCode openStatus = U_ZERO_ERROR;
+ UText* text = openLatin1UTextProvider(&textLocal, string.characters8(), string.length(), &openStatus);
+ if (U_FAILURE(openStatus)) {
+ LOG_ERROR("uTextOpenLatin1 failed with status %d", openStatus);
+ return nullptr;
+ }
+
+ UErrorCode setTextStatus = U_ZERO_ERROR;
+ ubrk_setUText(&iterator, text, &setTextStatus);
+ if (U_FAILURE(setTextStatus)) {
+ LOG_ERROR("ubrk_setUText failed with status %d", setTextStatus);
+ return nullptr;
+ }
+
+ utext_close(text);
+ } else {
+ UErrorCode setTextStatus = U_ZERO_ERROR;
+ ubrk_setText(&iterator, string.characters16(), string.length(), &setTextStatus);
+ if (U_FAILURE(setTextStatus))
+ return nullptr;
+ }
+
+ return &iterator;
+}
+
+static UBreakIterator* setContextAwareTextForIterator(UBreakIterator& iterator, StringView string, const UChar* priorContext, unsigned priorContextLength)
+{
+ if (string.is8Bit()) {
+ UTextWithBuffer textLocal;
+ textLocal.text = UTEXT_INITIALIZER;
+ textLocal.text.extraSize = sizeof(textLocal.buffer);
+ textLocal.text.pExtra = textLocal.buffer;
+
+ UErrorCode openStatus = U_ZERO_ERROR;
+ UText* text = openLatin1ContextAwareUTextProvider(&textLocal, string.characters8(), string.length(), priorContext, priorContextLength, &openStatus);
+ if (U_FAILURE(openStatus)) {
+ LOG_ERROR("openLatin1ContextAwareUTextProvider failed with status %d", openStatus);
+ return nullptr;
+ }
+
+ UErrorCode setTextStatus = U_ZERO_ERROR;
+ ubrk_setUText(&iterator, text, &setTextStatus);
+ if (U_FAILURE(setTextStatus)) {
+ LOG_ERROR("ubrk_setUText failed with status %d", setTextStatus);
+ return nullptr;
+ }
+
+ utext_close(text);
+ } else {
+ UText textLocal = UTEXT_INITIALIZER;
+
+ UErrorCode openStatus = U_ZERO_ERROR;
+ UText* text = openUTF16ContextAwareUTextProvider(&textLocal, string.characters16(), string.length(), priorContext, priorContextLength, &openStatus);
+ if (U_FAILURE(openStatus)) {
+ LOG_ERROR("openUTF16ContextAwareUTextProvider failed with status %d", openStatus);
+ return 0;
+ }
+
+ UErrorCode setTextStatus = U_ZERO_ERROR;
+ ubrk_setUText(&iterator, text, &setTextStatus);
+ if (U_FAILURE(setTextStatus)) {
+ LOG_ERROR("ubrk_setUText failed with status %d", setTextStatus);
+ return nullptr;
+ }
+
+ utext_close(text);
+ }
+
+ return &iterator;
+}
+
+
+// Static iterators
+
+UBreakIterator* wordBreakIterator(StringView string)
+{
+ static UBreakIterator* staticWordBreakIterator = initializeIterator(UBRK_WORD);
+ if (!staticWordBreakIterator)
+ return nullptr;
+
+ return setTextForIterator(*staticWordBreakIterator, string);
+}
+
+UBreakIterator* sentenceBreakIterator(StringView string)
+{
+ static UBreakIterator* staticSentenceBreakIterator = initializeIterator(UBRK_SENTENCE);
+ if (!staticSentenceBreakIterator)
+ return nullptr;
+
+ return setTextForIterator(*staticSentenceBreakIterator, string);
+}
+
+UBreakIterator* cursorMovementIterator(StringView string)
+{
+#if !PLATFORM(IOS)
+ // This rule set is based on character-break iterator rules of ICU 57
+ // <http://source.icu-project.org/repos/icu/icu/tags/release-57-1/source/data/brkitr/>.
+ // The major differences from the original ones are listed below:
+ // * Replaced '[\p{Grapheme_Cluster_Break = SpacingMark}]' with '[\p{General_Category = Spacing Mark} - $Extend]' for ICU 3.8 or earlier;
+ // * Removed rules that prevent a cursor from moving after prepend characters (Bug 24342);
+ // * Added rules that prevent a cursor from moving after virama signs of Indic languages except Tamil (Bug 15790), and;
+ // * Added rules that prevent a cursor from moving before Japanese half-width katakara voiced marks.
+ // * Added rules for regional indicator symbols.
+ static const char* kRules =
+ "$CR = [\\p{Grapheme_Cluster_Break = CR}];"
+ "$LF = [\\p{Grapheme_Cluster_Break = LF}];"
+ "$Control = [\\p{Grapheme_Cluster_Break = Control}];"
+ "$VoiceMarks = [\\uFF9E\\uFF9F];" // Japanese half-width katakana voiced marks
+ "$Extend = [\\p{Grapheme_Cluster_Break = Extend} $VoiceMarks - [\\u0E30 \\u0E32 \\u0E45 \\u0EB0 \\u0EB2]];"
+ "$SpacingMark = [[\\p{General_Category = Spacing Mark}] - $Extend];"
+ "$L = [\\p{Grapheme_Cluster_Break = L}];"
+ "$V = [\\p{Grapheme_Cluster_Break = V}];"
+ "$T = [\\p{Grapheme_Cluster_Break = T}];"
+ "$LV = [\\p{Grapheme_Cluster_Break = LV}];"
+ "$LVT = [\\p{Grapheme_Cluster_Break = LVT}];"
+ "$Hin0 = [\\u0905-\\u0939];" // Devanagari Letter A,...,Ha
+ "$HinV = \\u094D;" // Devanagari Sign Virama
+ "$Hin1 = [\\u0915-\\u0939];" // Devanagari Letter Ka,...,Ha
+ "$Ben0 = [\\u0985-\\u09B9];" // Bengali Letter A,...,Ha
+ "$BenV = \\u09CD;" // Bengali Sign Virama
+ "$Ben1 = [\\u0995-\\u09B9];" // Bengali Letter Ka,...,Ha
+ "$Pan0 = [\\u0A05-\\u0A39];" // Gurmukhi Letter A,...,Ha
+ "$PanV = \\u0A4D;" // Gurmukhi Sign Virama
+ "$Pan1 = [\\u0A15-\\u0A39];" // Gurmukhi Letter Ka,...,Ha
+ "$Guj0 = [\\u0A85-\\u0AB9];" // Gujarati Letter A,...,Ha
+ "$GujV = \\u0ACD;" // Gujarati Sign Virama
+ "$Guj1 = [\\u0A95-\\u0AB9];" // Gujarati Letter Ka,...,Ha
+ "$Ori0 = [\\u0B05-\\u0B39];" // Oriya Letter A,...,Ha
+ "$OriV = \\u0B4D;" // Oriya Sign Virama
+ "$Ori1 = [\\u0B15-\\u0B39];" // Oriya Letter Ka,...,Ha
+ "$Tel0 = [\\u0C05-\\u0C39];" // Telugu Letter A,...,Ha
+ "$TelV = \\u0C4D;" // Telugu Sign Virama
+ "$Tel1 = [\\u0C14-\\u0C39];" // Telugu Letter Ka,...,Ha
+ "$Kan0 = [\\u0C85-\\u0CB9];" // Kannada Letter A,...,Ha
+ "$KanV = \\u0CCD;" // Kannada Sign Virama
+ "$Kan1 = [\\u0C95-\\u0CB9];" // Kannada Letter A,...,Ha
+ "$Mal0 = [\\u0D05-\\u0D39];" // Malayalam Letter A,...,Ha
+ "$MalV = \\u0D4D;" // Malayalam Sign Virama
+ "$Mal1 = [\\u0D15-\\u0D39];" // Malayalam Letter A,...,Ha
+ "$RI = [\\U0001F1E6-\\U0001F1FF];" // Emoji regional indicators
+ "$ZWJ = \\u200D;" // Zero width joiner
+ "$EmojiVar = [\\uFE0F];" // Emoji-style variation selector
+#if ADDITIONAL_EMOJI_SUPPORT
+ "$EmojiForSeqs = [\\u2640 \\u2642 \\u26F9 \\u2764 \\U0001F308 \\U0001F3C3-\\U0001F3C4 \\U0001F3CA-\\U0001F3CC \\U0001F3F3 \\U0001F441 \\U0001F466-\\U0001F469 \\U0001F46E-\\U0001F46F \\U0001F471 \\U0001F473 \\U0001F477 \\U0001F481-\\U0001F482 \\U0001F486-\\U0001F487 \\U0001F48B \\U0001F575 \\U0001F5E8 \\U0001F645-\\U0001F647 \\U0001F64B \\U0001F64D-\\U0001F64E \\U0001F6A3 \\U0001F6B4-\\U0001F6B6 \\u2695-\\u2696 \\u2708 \\U0001F33E \\U0001F373 \\U0001F393 \\U0001F3A4 \\U0001F3A8 \\U0001F3EB \\U0001F3ED \\U0001F4BB-\\U0001F4BC \\U0001F527 \\U0001F52C \\U0001F680 \\U0001F692 \\U0001F926 \\U0001F937-\\U0001F939 \\U0001F93C-\\U0001F93E];" // Emoji that participate in ZWJ sequences
+ "$EmojiForMods = [\\u261D \\u26F9 \\u270A-\\u270D \\U0001F385 \\U0001F3C3-\\U0001F3C4 \\U0001F3CA \\U0001F3CB \\U0001F442-\\U0001F443 \\U0001F446-\\U0001F450 \\U0001F466-\\U0001F478 \\U0001F47C \\U0001F481-\\U0001F483 \\U0001F485-\\U0001F487 \\U0001F4AA \\U0001F575 \\U0001F590 \\U0001F595 \\U0001F596 \\U0001F645-\\U0001F647 \\U0001F64B-\\U0001F64F \\U0001F6A3 \\U0001F6B4-\\U0001F6B6 \\U0001F6C0 \\U0001F918 \\U0001F3C2 \\U0001F3C7 \\U0001F3CC \\U0001F574 \\U0001F57A \\U0001F6CC \\U0001F919-\\U0001F91E \\U0001F926 \\U0001F930 \\U0001F933-\\U0001F939 \\U0001F93C-\\U0001F93E] ;" // Emoji that take Fitzpatrick modifiers
+#else
+ "$EmojiForSeqs = [\\u2764 \\U0001F466-\\U0001F469 \\U0001F48B];" // Emoji that participate in ZWJ sequences
+ "$EmojiForMods = [\\u261D \\u270A-\\u270C \\U0001F385 \\U0001F3C3-\\U0001F3C4 \\U0001F3C7 \\U0001F3CA \\U0001F442-\\U0001F443 \\U0001F446-\\U0001F450 \\U0001F466-\\U0001F469 \\U0001F46E-\\U0001F478 \\U0001F47C \\U0001F481-\\U0001F483 \\U0001F485-\\U0001F487 \\U0001F4AA \\U0001F596 \\U0001F645-\\U0001F647 \\U0001F64B-\\U0001F64F \\U0001F6A3 \\U0001F6B4-\\U0001F6B6 \\U0001F6C0] ;" // Emoji that take Fitzpatrick modifiers
+#endif
+ "$EmojiMods = [\\U0001F3FB-\\U0001F3FF];" // Fitzpatrick modifiers
+ "!!chain;"
+#if ADDITIONAL_EMOJI_SUPPORT
+ "!!RINoChain;"
+#endif
+ "!!forward;"
+ "$CR $LF;"
+ "$L ($L | $V | $LV | $LVT);"
+ "($LV | $V) ($V | $T);"
+ "($LVT | $T) $T;"
+#if ADDITIONAL_EMOJI_SUPPORT
+ "$RI $RI $Extend* / $RI;"
+ "$RI $RI $Extend*;"
+ "[^$Control $CR $LF] $Extend;"
+ "[^$Control $CR $LF] $SpacingMark;"
+#else
+ "[^$Control $CR $LF] $Extend;"
+ "[^$Control $CR $LF] $SpacingMark;"
+ "$RI $RI / $RI;"
+ "$RI $RI;"
+#endif
+ "$Hin0 $HinV $Hin1;" // Devanagari Virama (forward)
+ "$Ben0 $BenV $Ben1;" // Bengali Virama (forward)
+ "$Pan0 $PanV $Pan1;" // Gurmukhi Virama (forward)
+ "$Guj0 $GujV $Guj1;" // Gujarati Virama (forward)
+ "$Ori0 $OriV $Ori1;" // Oriya Virama (forward)
+ "$Tel0 $TelV $Tel1;" // Telugu Virama (forward)
+ "$Kan0 $KanV $Kan1;" // Kannada Virama (forward)
+ "$Mal0 $MalV $Mal1;" // Malayalam Virama (forward)
+ "$ZWJ $EmojiForSeqs;" // Don't break in emoji ZWJ sequences
+ "$EmojiForMods $EmojiVar? $EmojiMods;" // Don't break between relevant emoji (possibly with variation selector) and Fitzpatrick modifier
+ "!!reverse;"
+ "$LF $CR;"
+ "($L | $V | $LV | $LVT) $L;"
+ "($V | $T) ($LV | $V);"
+ "$T ($LVT | $T);"
+#if ADDITIONAL_EMOJI_SUPPORT
+ "$Extend* $RI $RI / $Extend* $RI $RI;"
+ "$Extend* $RI $RI;"
+ "$Extend [^$Control $CR $LF];"
+ "$SpacingMark [^$Control $CR $LF];"
+#else
+ "$Extend [^$Control $CR $LF];"
+ "$SpacingMark [^$Control $CR $LF];"
+ "$RI $RI / $RI $RI;"
+ "$RI $RI;"
+#endif
+ "$Hin1 $HinV $Hin0;" // Devanagari Virama (backward)
+ "$Ben1 $BenV $Ben0;" // Bengali Virama (backward)
+ "$Pan1 $PanV $Pan0;" // Gurmukhi Virama (backward)
+ "$Guj1 $GujV $Guj0;" // Gujarati Virama (backward)
+ "$Ori1 $OriV $Ori0;" // Gujarati Virama (backward)
+ "$Tel1 $TelV $Tel0;" // Telugu Virama (backward)
+ "$Kan1 $KanV $Kan0;" // Kannada Virama (backward)
+ "$Mal1 $MalV $Mal0;" // Malayalam Virama (backward)
+ "$EmojiForSeqs $ZWJ;" // Don't break in emoji ZWJ sequences
+ "$EmojiMods $EmojiVar? $EmojiForMods;" // Don't break between relevant emoji (possibly with variation selector) and Fitzpatrick modifier
+#if ADDITIONAL_EMOJI_SUPPORT
+ "!!safe_reverse;"
+ "$RI $RI+;"
+ "[$EmojiVar $EmojiMods]+ $EmojiForMods;"
+ "!!safe_forward;"
+ "$RI $RI+;"
+ "$EmojiForMods [$EmojiVar $EmojiMods]+;";
+#else
+ "[$EmojiVar $EmojiMods]+ $EmojiForMods;"
+ "$EmojiForMods [$EmojiVar $EmojiMods]+;"
+ "!!safe_reverse;"
+ "!!safe_forward;";
+#endif
+ static UBreakIterator* staticCursorMovementIterator = initializeIteratorWithRules(kRules);
+#else // PLATFORM(IOS)
+ // Use the special Thai character break iterator for all locales
+ static UBreakIterator* staticCursorMovementIterator = initializeIterator(UBRK_CHARACTER, "th");
+#endif // !PLATFORM(IOS)
+
+ if (!staticCursorMovementIterator)
+ return nullptr;
+
+ return setTextForIterator(*staticCursorMovementIterator, string);
+}
+
+UBreakIterator* acquireLineBreakIterator(StringView string, const AtomicString& locale, const UChar* priorContext, unsigned priorContextLength, LineBreakIteratorMode mode)
+{
+ UBreakIterator* iterator = LineBreakIteratorPool::sharedPool().take(locale, mode);
+ if (!iterator)
+ return nullptr;
+
+ return setContextAwareTextForIterator(*iterator, string, priorContext, priorContextLength);
+}
+
+void releaseLineBreakIterator(UBreakIterator* iterator)
+{
+ ASSERT_ARG(iterator, iterator);
+
+ LineBreakIteratorPool::sharedPool().put(iterator);
+}
+
+UBreakIterator* openLineBreakIterator(const AtomicString& locale)
+{
+ bool localeIsEmpty = locale.isEmpty();
+ UErrorCode openStatus = U_ZERO_ERROR;
+ UBreakIterator* ubrkIter = ubrk_open(UBRK_LINE, localeIsEmpty ? currentTextBreakLocaleID() : locale.string().utf8().data(), 0, 0, &openStatus);
+ // locale comes from a web page and it can be invalid, leading ICU
+ // to fail, in which case we fall back to the default locale.
+ if (!localeIsEmpty && U_FAILURE(openStatus)) {
+ openStatus = U_ZERO_ERROR;
+ ubrkIter = ubrk_open(UBRK_LINE, currentTextBreakLocaleID(), 0, 0, &openStatus);
+ }
+
+ if (U_FAILURE(openStatus)) {
+ LOG_ERROR("ubrk_open failed with status %d", openStatus);
+ return nullptr;
+ }
+
+ return ubrkIter;
+}
+
+void closeLineBreakIterator(UBreakIterator*& iterator)
+{
+ UBreakIterator* ubrkIter = iterator;
+ ASSERT(ubrkIter);
+ ubrk_close(ubrkIter);
+ iterator = nullptr;
+}
+
+static std::atomic<UBreakIterator*> nonSharedCharacterBreakIterator = ATOMIC_VAR_INIT(nullptr);
+
+static inline UBreakIterator* getNonSharedCharacterBreakIterator()
+{
+ if (auto *res = nonSharedCharacterBreakIterator.exchange(nullptr, std::memory_order_acquire))
+ return res;
+ return initializeIterator(UBRK_CHARACTER);
+}
+
+static inline void cacheNonSharedCharacterBreakIterator(UBreakIterator* cacheMe)
+{
+ if (auto *old = nonSharedCharacterBreakIterator.exchange(cacheMe, std::memory_order_release))
+ ubrk_close(old);
+}
+
+NonSharedCharacterBreakIterator::NonSharedCharacterBreakIterator(StringView string)
+{
+ if ((m_iterator = getNonSharedCharacterBreakIterator()))
+ m_iterator = setTextForIterator(*m_iterator, string);
+}
+
+NonSharedCharacterBreakIterator::~NonSharedCharacterBreakIterator()
+{
+ if (m_iterator)
+ cacheNonSharedCharacterBreakIterator(m_iterator);
+}
+
+NonSharedCharacterBreakIterator::NonSharedCharacterBreakIterator(NonSharedCharacterBreakIterator&& other)
+ : m_iterator(nullptr)
+{
+ std::swap(m_iterator, other.m_iterator);
+}
+
+// Iterator implemenation.
+
+bool isWordTextBreak(UBreakIterator* iterator)
+{
+ int ruleStatus = ubrk_getRuleStatus(iterator);
+ return ruleStatus != UBRK_WORD_NONE;
+}
+
+unsigned numGraphemeClusters(StringView string)
+{
+ unsigned stringLength = string.length();
+
+ if (!stringLength)
+ return 0;
+
+ // The only Latin-1 Extended Grapheme Cluster is CRLF.
+ if (string.is8Bit()) {
+ auto* characters = string.characters8();
+ unsigned numCRLF = 0;
+ for (unsigned i = 1; i < stringLength; ++i)
+ numCRLF += characters[i - 1] == '\r' && characters[i] == '\n';
+ return stringLength - numCRLF;
+ }
+
+ NonSharedCharacterBreakIterator iterator { string };
+ if (!iterator) {
+ ASSERT_NOT_REACHED();
+ return stringLength;
+ }
+
+ unsigned numGraphemeClusters = 0;
+ while (ubrk_next(iterator) != UBRK_DONE)
+ ++numGraphemeClusters;
+ return numGraphemeClusters;
+}
+
+unsigned numCharactersInGraphemeClusters(StringView string, unsigned numGraphemeClusters)
+{
+ unsigned stringLength = string.length();
+
+ if (stringLength <= numGraphemeClusters)
+ return stringLength;
+
+ // The only Latin-1 Extended Grapheme Cluster is CRLF.
+ if (string.is8Bit()) {
+ auto* characters = string.characters8();
+ unsigned i, j;
+ for (i = 0, j = 0; i < numGraphemeClusters && j + 1 < stringLength; ++i, ++j)
+ j += characters[j] == '\r' && characters[j + 1] == '\n';
+ return j + (i < numGraphemeClusters);
+ }
+
+ NonSharedCharacterBreakIterator iterator { string };
+ if (!iterator) {
+ ASSERT_NOT_REACHED();
+ return stringLength;
+ }
+
+ for (unsigned i = 0; i < numGraphemeClusters; ++i) {
+ if (ubrk_next(iterator) == UBRK_DONE)
+ return stringLength;
+ }
+ return ubrk_current(iterator);
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/text/TextBreakIterator.h b/Source/WTF/wtf/text/TextBreakIterator.h
new file mode 100644
index 000000000..2bb5f9ca4
--- /dev/null
+++ b/Source/WTF/wtf/text/TextBreakIterator.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2006 Lars Knoll <lars@trolltech.com>
+ * Copyright (C) 2007-2016 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#pragma once
+
+#include <wtf/text/StringView.h>
+
+namespace WTF {
+
+// Note: The returned iterator is good only until you get another iterator, with the exception of acquireLineBreakIterator.
+
+enum class LineBreakIteratorMode { Default, Loose, Normal, Strict };
+
+// This is similar to character break iterator in most cases, but is subject to
+// platform UI conventions. One notable example where this can be different
+// from character break iterator is Thai prepend characters, see bug 24342.
+// Use this for insertion point and selection manipulations.
+WTF_EXPORT_PRIVATE UBreakIterator* cursorMovementIterator(StringView);
+
+WTF_EXPORT_PRIVATE UBreakIterator* wordBreakIterator(StringView);
+WTF_EXPORT_PRIVATE UBreakIterator* sentenceBreakIterator(StringView);
+
+WTF_EXPORT_PRIVATE UBreakIterator* acquireLineBreakIterator(StringView, const AtomicString& locale, const UChar* priorContext, unsigned priorContextLength, LineBreakIteratorMode);
+WTF_EXPORT_PRIVATE void releaseLineBreakIterator(UBreakIterator*);
+UBreakIterator* openLineBreakIterator(const AtomicString& locale);
+void closeLineBreakIterator(UBreakIterator*&);
+
+WTF_EXPORT_PRIVATE bool isWordTextBreak(UBreakIterator*);
+
+class LazyLineBreakIterator {
+public:
+ LazyLineBreakIterator()
+ {
+ resetPriorContext();
+ }
+
+ explicit LazyLineBreakIterator(StringView stringView, const AtomicString& locale = AtomicString(), LineBreakIteratorMode mode = LineBreakIteratorMode::Default)
+ : m_stringView(stringView)
+ , m_locale(locale)
+ , m_mode(mode)
+ {
+ resetPriorContext();
+ }
+
+ ~LazyLineBreakIterator()
+ {
+ if (m_iterator)
+ releaseLineBreakIterator(m_iterator);
+ }
+
+ StringView stringView() const { return m_stringView; }
+ LineBreakIteratorMode mode() const { return m_mode; }
+
+ UChar lastCharacter() const
+ {
+ static_assert(WTF_ARRAY_LENGTH(m_priorContext) == 2, "UBreakIterator unexpected prior context length");
+ return m_priorContext[1];
+ }
+
+ UChar secondToLastCharacter() const
+ {
+ static_assert(WTF_ARRAY_LENGTH(m_priorContext) == 2, "UBreakIterator unexpected prior context length");
+ return m_priorContext[0];
+ }
+
+ void setPriorContext(UChar last, UChar secondToLast)
+ {
+ static_assert(WTF_ARRAY_LENGTH(m_priorContext) == 2, "UBreakIterator unexpected prior context length");
+ m_priorContext[0] = secondToLast;
+ m_priorContext[1] = last;
+ }
+
+ void updatePriorContext(UChar last)
+ {
+ static_assert(WTF_ARRAY_LENGTH(m_priorContext) == 2, "UBreakIterator unexpected prior context length");
+ m_priorContext[0] = m_priorContext[1];
+ m_priorContext[1] = last;
+ }
+
+ void resetPriorContext()
+ {
+ static_assert(WTF_ARRAY_LENGTH(m_priorContext) == 2, "UBreakIterator unexpected prior context length");
+ m_priorContext[0] = 0;
+ m_priorContext[1] = 0;
+ }
+
+ unsigned priorContextLength() const
+ {
+ unsigned priorContextLength = 0;
+ static_assert(WTF_ARRAY_LENGTH(m_priorContext) == 2, "UBreakIterator unexpected prior context length");
+ if (m_priorContext[1]) {
+ ++priorContextLength;
+ if (m_priorContext[0])
+ ++priorContextLength;
+ }
+ return priorContextLength;
+ }
+
+ // Obtain text break iterator, possibly previously cached, where this iterator is (or has been)
+ // initialized to use the previously stored string as the primary breaking context and using
+ // previously stored prior context if non-empty.
+ UBreakIterator* get(unsigned priorContextLength)
+ {
+ ASSERT(priorContextLength <= priorContextCapacity);
+ const UChar* priorContext = priorContextLength ? &m_priorContext[priorContextCapacity - priorContextLength] : 0;
+ if (!m_iterator) {
+ m_iterator = acquireLineBreakIterator(m_stringView, m_locale, priorContext, priorContextLength, m_mode);
+ m_cachedPriorContext = priorContext;
+ m_cachedPriorContextLength = priorContextLength;
+ } else if (priorContext != m_cachedPriorContext || priorContextLength != m_cachedPriorContextLength) {
+ resetStringAndReleaseIterator(m_stringView, m_locale, m_mode);
+ return this->get(priorContextLength);
+ }
+ return m_iterator;
+ }
+
+ void resetStringAndReleaseIterator(StringView stringView, const AtomicString& locale, LineBreakIteratorMode mode)
+ {
+ if (m_iterator)
+ releaseLineBreakIterator(m_iterator);
+ m_stringView = stringView;
+ m_locale = locale;
+ m_iterator = nullptr;
+ m_cachedPriorContext = nullptr;
+ m_mode = mode;
+ m_cachedPriorContextLength = 0;
+ }
+
+private:
+ static constexpr unsigned priorContextCapacity = 2;
+ StringView m_stringView;
+ AtomicString m_locale;
+ UBreakIterator* m_iterator { nullptr };
+ const UChar* m_cachedPriorContext { nullptr };
+ LineBreakIteratorMode m_mode { LineBreakIteratorMode::Default };
+ unsigned m_cachedPriorContextLength { 0 };
+ UChar m_priorContext[priorContextCapacity];
+};
+
+// Iterates over "extended grapheme clusters", as defined in UAX #29.
+// Note that platform implementations may be less sophisticated - e.g. ICU prior to
+// version 4.0 only supports "legacy grapheme clusters".
+// Use this for general text processing, e.g. string truncation.
+
+class NonSharedCharacterBreakIterator {
+ WTF_MAKE_NONCOPYABLE(NonSharedCharacterBreakIterator);
+public:
+ WTF_EXPORT_PRIVATE NonSharedCharacterBreakIterator(StringView);
+ WTF_EXPORT_PRIVATE ~NonSharedCharacterBreakIterator();
+
+ NonSharedCharacterBreakIterator(NonSharedCharacterBreakIterator&&);
+
+ operator UBreakIterator*() const { return m_iterator; }
+
+private:
+ UBreakIterator* m_iterator;
+};
+
+// Counts the number of grapheme clusters. A surrogate pair or a sequence
+// of a non-combining character and following combining characters is
+// counted as 1 grapheme cluster.
+WTF_EXPORT_PRIVATE unsigned numGraphemeClusters(StringView);
+
+// Returns the number of characters which will be less than or equal to
+// the specified grapheme cluster length.
+WTF_EXPORT_PRIVATE unsigned numCharactersInGraphemeClusters(StringView, unsigned);
+
+}
+
+using WTF::LazyLineBreakIterator;
+using WTF::LineBreakIteratorMode;
+using WTF::NonSharedCharacterBreakIterator;
+using WTF::isWordTextBreak;
diff --git a/Source/WTF/wtf/text/TextBreakIteratorInternalICU.h b/Source/WTF/wtf/text/TextBreakIteratorInternalICU.h
new file mode 100644
index 000000000..70a301c88
--- /dev/null
+++ b/Source/WTF/wtf/text/TextBreakIteratorInternalICU.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef TextBreakIteratorInternalICU_h
+#define TextBreakIteratorInternalICU_h
+
+// FIXME: Now that this handles locales for ICU, not just for text breaking,
+// this file and the various implementation files should be renamed.
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE const char* currentSearchLocaleID();
+WTF_EXPORT_PRIVATE const char* currentTextBreakLocaleID();
+
+}
+
+using WTF::currentSearchLocaleID;
+using WTF::currentTextBreakLocaleID;
+
+#endif
diff --git a/Source/WTF/wtf/text/TextPosition.h b/Source/WTF/wtf/text/TextPosition.h
index be49c157a..2f108b038 100644
--- a/Source/WTF/wtf/text/TextPosition.h
+++ b/Source/WTF/wtf/text/TextPosition.h
@@ -22,37 +22,12 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef TextPosition_h
-#define TextPosition_h
+#pragma once
-#include <wtf/Assertions.h>
+#include "OrdinalNumber.h"
namespace WTF {
-// An abstract number of element in a sequence. The sequence has a first element.
-// This type should be used instead of integer because 2 contradicting traditions can
-// call a first element '0' or '1' which makes integer type ambiguous.
-class OrdinalNumber {
-public:
- static OrdinalNumber fromZeroBasedInt(int zeroBasedInt) { return OrdinalNumber(zeroBasedInt); }
- static OrdinalNumber fromOneBasedInt(int oneBasedInt) { return OrdinalNumber(oneBasedInt - 1); }
- OrdinalNumber() : m_zeroBasedValue(0) { }
-
- int zeroBasedInt() const { return m_zeroBasedValue; }
- int oneBasedInt() const { return m_zeroBasedValue + 1; }
-
- bool operator==(OrdinalNumber other) { return m_zeroBasedValue == other.m_zeroBasedValue; }
- bool operator!=(OrdinalNumber other) { return !((*this) == other); }
-
- static OrdinalNumber first() { return OrdinalNumber(0); }
- static OrdinalNumber beforeFirst() { return OrdinalNumber(-1); }
-
-private:
- OrdinalNumber(int zeroBasedInt) : m_zeroBasedValue(zeroBasedInt) { }
- int m_zeroBasedValue;
-};
-
-
// TextPosition structure specifies coordinates within an text resource. It is used mostly
// for saving script source position.
class TextPosition {
@@ -62,13 +37,11 @@ public:
, m_column(column)
{
}
+
TextPosition() { }
bool operator==(const TextPosition& other) { return m_line == other.m_line && m_column == other.m_column; }
bool operator!=(const TextPosition& other) { return !((*this) == other); }
- // A 'minimum' value of position, used as a default value.
- static TextPosition minimumPosition() { return TextPosition(OrdinalNumber::first(), OrdinalNumber::first()); }
-
// A value with line value less than a minimum; used as an impossible position.
static TextPosition belowRangePosition() { return TextPosition(OrdinalNumber::beforeFirst(), OrdinalNumber::beforeFirst()); }
@@ -78,8 +51,4 @@ public:
}
-using WTF::OrdinalNumber;
-
using WTF::TextPosition;
-
-#endif // TextPosition_h
diff --git a/Source/WTF/wtf/text/UniquedStringImpl.h b/Source/WTF/wtf/text/UniquedStringImpl.h
new file mode 100644
index 000000000..09aba85cf
--- /dev/null
+++ b/Source/WTF/wtf/text/UniquedStringImpl.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UniquedStringImpl_h
+#define UniquedStringImpl_h
+
+#include <wtf/text/StringImpl.h>
+
+namespace WTF {
+
+// It represents that the string impl is uniqued in some ways.
+// When the given 2 string impls are both uniqued string impls, we can compare it just using pointer comparison.
+class UniquedStringImpl : public StringImpl {
+private:
+ UniquedStringImpl() = delete;
+protected:
+ UniquedStringImpl(CreateSymbolTag, const LChar* characters, unsigned length) : StringImpl(CreateSymbol, characters, length) { }
+ UniquedStringImpl(CreateSymbolTag, const UChar* characters, unsigned length) : StringImpl(CreateSymbol, characters, length) { }
+ UniquedStringImpl(CreateSymbolTag) : StringImpl(CreateSymbol) { }
+};
+
+#if !ASSERT_DISABLED
+// UniquedStringImpls created from StaticStringImpl will ASSERT
+// in the generic ValueCheck<T>::checkConsistency
+// as they are not allocated by fastMalloc.
+// We don't currently have any way to detect that case
+// so we ignore the consistency check for all UniquedStringImpls*.
+template<> struct
+ValueCheck<UniquedStringImpl*> {
+ static void checkConsistency(const UniquedStringImpl*) { }
+};
+
+template<> struct
+ValueCheck<const UniquedStringImpl*> {
+ static void checkConsistency(const UniquedStringImpl*) { }
+};
+#endif
+
+} // namespace WTF
+
+using WTF::UniquedStringImpl;
+
+#endif // UniquedStringImpl_h
diff --git a/Source/WTF/wtf/text/WTFString.cpp b/Source/WTF/wtf/text/WTFString.cpp
index 45ba8af52..4f49ebca1 100644
--- a/Source/WTF/wtf/text/WTFString.cpp
+++ b/Source/WTF/wtf/text/WTFString.cpp
@@ -35,7 +35,6 @@
#include <wtf/dtoa.h>
#include <wtf/unicode/CharacterNames.h>
#include <wtf/unicode/UTF8.h>
-#include <wtf/unicode/Unicode.h>
namespace WTF {
@@ -90,64 +89,75 @@ String::String(ASCIILiteral characters)
void String::append(const String& str)
{
+ // FIXME: This is extremely inefficient. So much so that we might want to take this out of String's API.
+
if (str.isEmpty())
return;
- // FIXME: This is extremely inefficient. So much so that we might want to take this
- // out of String's API. We can make it better by optimizing the case where exactly
- // one String is pointing at this StringImpl, but even then it's going to require a
- // call to fastMalloc every single time.
if (str.m_impl) {
if (m_impl) {
if (m_impl->is8Bit() && str.m_impl->is8Bit()) {
LChar* data;
if (str.length() > std::numeric_limits<unsigned>::max() - m_impl->length())
CRASH();
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + str.length(), data);
+ auto newImpl = StringImpl::createUninitialized(m_impl->length() + str.length(), data);
memcpy(data, m_impl->characters8(), m_impl->length() * sizeof(LChar));
memcpy(data + m_impl->length(), str.characters8(), str.length() * sizeof(LChar));
- m_impl = newImpl.release();
+ m_impl = WTFMove(newImpl);
return;
}
UChar* data;
if (str.length() > std::numeric_limits<unsigned>::max() - m_impl->length())
CRASH();
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + str.length(), data);
- memcpy(data, m_impl->deprecatedCharacters(), m_impl->length() * sizeof(UChar));
- memcpy(data + m_impl->length(), str.deprecatedCharacters(), str.length() * sizeof(UChar));
- m_impl = newImpl.release();
+ auto newImpl = StringImpl::createUninitialized(m_impl->length() + str.length(), data);
+ StringView(*m_impl).getCharactersWithUpconvert(data);
+ StringView(str).getCharactersWithUpconvert(data + m_impl->length());
+ m_impl = WTFMove(newImpl);
} else
m_impl = str.m_impl;
}
}
-template <typename CharacterType>
-inline void String::appendInternal(CharacterType c)
+void String::append(LChar character)
{
- // FIXME: This is extremely inefficient. So much so that we might want to take this
- // out of String's API. We can make it better by optimizing the case where exactly
- // one String is pointing at this StringImpl, but even then it's going to require a
- // call to fastMalloc every single time.
- if (m_impl) {
- UChar* data;
- if (m_impl->length() >= std::numeric_limits<unsigned>::max())
- CRASH();
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + 1, data);
- memcpy(data, m_impl->deprecatedCharacters(), m_impl->length() * sizeof(UChar));
- data[m_impl->length()] = c;
- m_impl = newImpl.release();
- } else
- m_impl = StringImpl::create(&c, 1);
-}
+ // FIXME: This is extremely inefficient. So much so that we might want to take this out of String's API.
-void String::append(LChar c)
-{
- appendInternal(c);
+ if (!m_impl) {
+ m_impl = StringImpl::create(&character, 1);
+ return;
+ }
+ if (!is8Bit()) {
+ append(static_cast<UChar>(character));
+ return;
+ }
+ if (m_impl->length() >= std::numeric_limits<unsigned>::max())
+ CRASH();
+ LChar* data;
+ auto newImpl = StringImpl::createUninitialized(m_impl->length() + 1, data);
+ memcpy(data, m_impl->characters8(), m_impl->length());
+ data[m_impl->length()] = character;
+ m_impl = WTFMove(newImpl);
}
-void String::append(UChar c)
+void String::append(UChar character)
{
- appendInternal(c);
+ // FIXME: This is extremely inefficient. So much so that we might want to take this out of String's API.
+
+ if (!m_impl) {
+ m_impl = StringImpl::create(&character, 1);
+ return;
+ }
+ if (character <= 0xFF && is8Bit()) {
+ append(static_cast<LChar>(character));
+ return;
+ }
+ if (m_impl->length() >= std::numeric_limits<unsigned>::max())
+ CRASH();
+ UChar* data;
+ auto newImpl = StringImpl::createUninitialized(m_impl->length() + 1, data);
+ StringView(*m_impl).getCharactersWithUpconvert(data);
+ data[m_impl->length()] = character;
+ m_impl = WTFMove(newImpl);
}
int codePointCompare(const String& a, const String& b)
@@ -155,20 +165,49 @@ int codePointCompare(const String& a, const String& b)
return codePointCompare(a.impl(), b.impl());
}
-void String::insert(const String& str, unsigned pos)
+void String::insert(const String& string, unsigned position)
{
- if (str.isEmpty()) {
- if (str.isNull())
+ // FIXME: This is extremely inefficient. So much so that we might want to take this out of String's API.
+
+ unsigned lengthToInsert = string.length();
+
+ if (!lengthToInsert) {
+ if (string.isNull())
return;
if (isNull())
- m_impl = str.impl();
+ m_impl = string.impl();
return;
}
- insert(str.deprecatedCharacters(), str.length(), pos);
+
+ if (position >= length()) {
+ append(string);
+ return;
+ }
+
+ if (lengthToInsert > std::numeric_limits<unsigned>::max() - length())
+ CRASH();
+
+ if (is8Bit() && string.is8Bit()) {
+ LChar* data;
+ auto newString = StringImpl::createUninitialized(length() + lengthToInsert, data);
+ StringView(*m_impl).substring(0, position).getCharactersWithUpconvert(data);
+ StringView(string).getCharactersWithUpconvert(data + position);
+ StringView(*m_impl).substring(position).getCharactersWithUpconvert(data + position + lengthToInsert);
+ m_impl = WTFMove(newString);
+ } else {
+ UChar* data;
+ auto newString = StringImpl::createUninitialized(length() + lengthToInsert, data);
+ StringView(*m_impl).substring(0, position).getCharactersWithUpconvert(data);
+ StringView(string).getCharactersWithUpconvert(data + position);
+ StringView(*m_impl).substring(position).getCharactersWithUpconvert(data + position + lengthToInsert);
+ m_impl = WTFMove(newString);
+ }
}
void String::append(const LChar* charactersToAppend, unsigned lengthToAppend)
{
+ // FIXME: This is extremely inefficient. So much so that we might want to take this out of String's API.
+
if (!m_impl) {
if (!charactersToAppend)
return;
@@ -187,24 +226,26 @@ void String::append(const LChar* charactersToAppend, unsigned lengthToAppend)
if (lengthToAppend > std::numeric_limits<unsigned>::max() - strLength)
CRASH();
LChar* data;
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(strLength + lengthToAppend, data);
+ auto newImpl = StringImpl::createUninitialized(strLength + lengthToAppend, data);
StringImpl::copyChars(data, m_impl->characters8(), strLength);
StringImpl::copyChars(data + strLength, charactersToAppend, lengthToAppend);
- m_impl = newImpl.release();
+ m_impl = WTFMove(newImpl);
return;
}
if (lengthToAppend > std::numeric_limits<unsigned>::max() - strLength)
CRASH();
UChar* data;
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() + lengthToAppend, data);
+ auto newImpl = StringImpl::createUninitialized(length() + lengthToAppend, data);
StringImpl::copyChars(data, m_impl->characters16(), strLength);
StringImpl::copyChars(data + strLength, charactersToAppend, lengthToAppend);
- m_impl = newImpl.release();
+ m_impl = WTFMove(newImpl);
}
void String::append(const UChar* charactersToAppend, unsigned lengthToAppend)
{
+ // FIXME: This is extremely inefficient. So much so that we might want to take this out of String's API.
+
if (!m_impl) {
if (!charactersToAppend)
return;
@@ -221,39 +262,16 @@ void String::append(const UChar* charactersToAppend, unsigned lengthToAppend)
if (lengthToAppend > std::numeric_limits<unsigned>::max() - strLength)
CRASH();
UChar* data;
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(strLength + lengthToAppend, data);
+ auto newImpl = StringImpl::createUninitialized(strLength + lengthToAppend, data);
if (m_impl->is8Bit())
StringImpl::copyChars(data, characters8(), strLength);
else
StringImpl::copyChars(data, characters16(), strLength);
StringImpl::copyChars(data + strLength, charactersToAppend, lengthToAppend);
- m_impl = newImpl.release();
+ m_impl = WTFMove(newImpl);
}
-void String::insert(const UChar* charactersToInsert, unsigned lengthToInsert, unsigned position)
-{
- if (position >= length()) {
- append(charactersToInsert, lengthToInsert);
- return;
- }
-
- ASSERT(m_impl);
-
- if (!lengthToInsert)
- return;
-
- ASSERT(charactersToInsert);
- UChar* data;
- if (lengthToInsert > std::numeric_limits<unsigned>::max() - length())
- CRASH();
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() + lengthToInsert, data);
- memcpy(data, deprecatedCharacters(), position * sizeof(UChar));
- memcpy(data + position, charactersToInsert, lengthToInsert * sizeof(UChar));
- memcpy(data + position + lengthToInsert, deprecatedCharacters() + position, (length() - position) * sizeof(UChar));
- m_impl = newImpl.release();
-}
-
UChar32 String::characterStartingAt(unsigned i) const
{
if (!m_impl || i >= m_impl->length())
@@ -263,24 +281,20 @@ UChar32 String::characterStartingAt(unsigned i) const
void String::truncate(unsigned position)
{
- if (position >= length())
- return;
- UChar* data;
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(position, data);
- memcpy(data, deprecatedCharacters(), position * sizeof(UChar));
- m_impl = newImpl.release();
+ if (m_impl)
+ m_impl = m_impl->substring(0, position);
}
template <typename CharacterType>
inline void String::removeInternal(const CharacterType* characters, unsigned position, int lengthToRemove)
{
CharacterType* data;
- RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() - lengthToRemove, data);
+ auto newImpl = StringImpl::createUninitialized(length() - lengthToRemove, data);
memcpy(data, characters, position * sizeof(CharacterType));
memcpy(data + position, characters + position + lengthToRemove,
(length() - lengthToRemove - position) * sizeof(CharacterType));
- m_impl = newImpl.release();
+ m_impl = WTFMove(newImpl);
}
void String::remove(unsigned position, int lengthToRemove)
@@ -318,35 +332,58 @@ String String::substringSharingImpl(unsigned offset, unsigned length) const
if (!offset && length == stringLength)
return *this;
- return String(StringImpl::create(m_impl, offset, length));
+ return String(StringImpl::createSubstringSharingImpl(*m_impl, offset, length));
+}
+
+String String::convertToASCIILowercase() const
+{
+ // FIXME: Should this function, and the many others like it, be inlined?
+ if (!m_impl)
+ return String();
+ return m_impl->convertToASCIILowercase();
+}
+
+String String::convertToASCIIUppercase() const
+{
+ // FIXME: Should this function, and the many others like it, be inlined?
+ if (!m_impl)
+ return String();
+ return m_impl->convertToASCIIUppercase();
+}
+
+String String::convertToLowercaseWithoutLocale() const
+{
+ if (!m_impl)
+ return String();
+ return m_impl->convertToLowercaseWithoutLocale();
}
-String String::lower() const
+String String::convertToLowercaseWithoutLocaleStartingAtFailingIndex8Bit(unsigned failingIndex) const
{
if (!m_impl)
return String();
- return m_impl->lower();
+ return m_impl->convertToLowercaseWithoutLocaleStartingAtFailingIndex8Bit(failingIndex);
}
-String String::upper() const
+String String::convertToUppercaseWithoutLocale() const
{
if (!m_impl)
return String();
- return m_impl->upper();
+ return m_impl->convertToUppercaseWithoutLocale();
}
-String String::lower(const AtomicString& localeIdentifier) const
+String String::convertToLowercaseWithLocale(const AtomicString& localeIdentifier) const
{
if (!m_impl)
return String();
- return m_impl->lower(localeIdentifier);
+ return m_impl->convertToLowercaseWithLocale(localeIdentifier);
}
-String String::upper(const AtomicString& localeIdentifier) const
+String String::convertToUppercaseWithLocale(const AtomicString& localeIdentifier) const
{
if (!m_impl)
return String();
- return m_impl->upper(localeIdentifier);
+ return m_impl->convertToUppercaseWithLocale(localeIdentifier);
}
String String::stripWhiteSpace() const
@@ -399,7 +436,10 @@ bool String::percentage(int& result) const
if ((*m_impl)[m_impl->length() - 1] != '%')
return false;
- result = charactersToIntStrict(m_impl->deprecatedCharacters(), m_impl->length() - 1);
+ if (m_impl->is8Bit())
+ result = charactersToIntStrict(m_impl->characters8(), m_impl->length() - 1);
+ else
+ result = charactersToIntStrict(m_impl->characters16(), m_impl->length() - 1);
return true;
}
@@ -427,33 +467,26 @@ Vector<UChar> String::charactersWithNullTermination() const
String String::format(const char *format, ...)
{
-#if OS(WINCE)
va_list args;
va_start(args, format);
- Vector<char, 256> buffer;
+#if USE(CF) && !OS(WINDOWS)
+ if (strstr(format, "%@")) {
+ RetainPtr<CFStringRef> cfFormat = adoptCF(CFStringCreateWithCString(kCFAllocatorDefault, format, kCFStringEncodingUTF8));
- int bufferSize = 256;
- buffer.resize(bufferSize);
- for (;;) {
- int written = vsnprintf(buffer.data(), bufferSize, format, args);
- va_end(args);
+#if COMPILER(CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wformat-nonliteral"
+#endif
+ RetainPtr<CFStringRef> result = adoptCF(CFStringCreateWithFormatAndArguments(kCFAllocatorDefault, nullptr, cfFormat.get(), args));
+#if COMPILER(CLANG)
+#pragma clang diagnostic pop
+#endif
- if (written == 0)
- return String("");
- if (written > 0)
- return StringImpl::create(reinterpret_cast<const LChar*>(buffer.data()), written);
-
- bufferSize <<= 1;
- buffer.resize(bufferSize);
- va_start(args, format);
+ va_end(args);
+ return result.get();
}
-
-#else
- va_list args;
- va_start(args, format);
-
- Vector<char, 256> buffer;
+#endif // USE(CF) && !OS(WINDOWS)
// Do the format once to get the length.
#if COMPILER(MSVC)
@@ -461,30 +494,25 @@ String String::format(const char *format, ...)
#else
char ch;
int result = vsnprintf(&ch, 1, format, args);
- // We need to call va_end() and then va_start() again here, as the
- // contents of args is undefined after the call to vsnprintf
- // according to http://man.cx/snprintf(3)
- //
- // Not calling va_end/va_start here happens to work on lots of
- // systems, but fails e.g. on 64bit Linux.
- va_end(args);
- va_start(args, format);
#endif
+ va_end(args);
if (result == 0)
return String("");
if (result < 0)
return String();
+
+ Vector<char, 256> buffer;
unsigned len = result;
buffer.grow(len + 1);
+ va_start(args, format);
// Now do the formatting again, guaranteed to fit.
vsnprintf(buffer.data(), buffer.size(), format, args);
va_end(args);
return StringImpl::create(reinterpret_cast<const LChar*>(buffer.data()), len);
-#endif
}
String String::number(int number)
@@ -663,12 +691,12 @@ String String::isolatedCopy() const &
return m_impl->isolatedCopy();
}
-String String::isolatedCopy() const &&
+String String::isolatedCopy() &&
{
if (isSafeToSendToAnotherThread()) {
// Since we know that our string is a temporary that will be destroyed
// we can just steal the m_impl from it, thus avoiding a copy.
- return String(std::move(*this));
+ return String(WTFMove(*this));
}
if (!m_impl)
@@ -689,14 +717,14 @@ bool String::isSafeToSendToAnotherThread() const
{
if (!impl())
return true;
+ if (isEmpty())
+ return true;
// AtomicStrings are not safe to send between threads as ~StringImpl()
// will try to remove them from the wrong AtomicStringTable.
if (impl()->isAtomic())
return false;
if (impl()->hasOneRef())
return true;
- if (isEmpty())
- return true;
return false;
}
@@ -802,6 +830,11 @@ CString String::utf8(ConversionMode mode) const
return m_impl->utf8(mode);
}
+CString String::utf8() const
+{
+ return utf8(LenientConversion);
+}
+
String String::make8BitFrom16BitSource(const UChar* source, size_t length)
{
if (!length)
@@ -1181,7 +1214,7 @@ String* string(const char* s)
Vector<char> asciiDebug(StringImpl* impl)
{
if (!impl)
- return asciiDebug(String("[null]").impl());
+ return asciiDebug(String(ASCIILiteral("[null]")).impl());
Vector<char> buffer;
for (unsigned i = 0; i < impl->length(); ++i) {
diff --git a/Source/WTF/wtf/text/WTFString.h b/Source/WTF/wtf/text/WTFString.h
index 5c8a0af95..cb4232d58 100644
--- a/Source/WTF/wtf/text/WTFString.h
+++ b/Source/WTF/wtf/text/WTFString.h
@@ -1,6 +1,6 @@
/*
* (C) 1999 Lars Knoll (knoll@kde.org)
- * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2004-2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -26,6 +26,7 @@
// on systems without case-sensitive file systems.
#include <wtf/text/ASCIIFastPath.h>
+#include <wtf/text/IntegerToStringConversion.h>
#include <wtf/text/StringImpl.h>
#ifdef __OBJC__
@@ -34,52 +35,45 @@
namespace WTF {
-class CString;
-struct StringHash;
+class ASCIILiteral;
// Declarations of string operations
-WTF_EXPORT_STRING_API int charactersToIntStrict(const LChar*, size_t, bool* ok = 0, int base = 10);
-WTF_EXPORT_STRING_API int charactersToIntStrict(const UChar*, size_t, bool* ok = 0, int base = 10);
-WTF_EXPORT_STRING_API unsigned charactersToUIntStrict(const LChar*, size_t, bool* ok = 0, int base = 10);
-WTF_EXPORT_STRING_API unsigned charactersToUIntStrict(const UChar*, size_t, bool* ok = 0, int base = 10);
-int64_t charactersToInt64Strict(const LChar*, size_t, bool* ok = 0, int base = 10);
-int64_t charactersToInt64Strict(const UChar*, size_t, bool* ok = 0, int base = 10);
-uint64_t charactersToUInt64Strict(const LChar*, size_t, bool* ok = 0, int base = 10);
-uint64_t charactersToUInt64Strict(const UChar*, size_t, bool* ok = 0, int base = 10);
-intptr_t charactersToIntPtrStrict(const LChar*, size_t, bool* ok = 0, int base = 10);
-intptr_t charactersToIntPtrStrict(const UChar*, size_t, bool* ok = 0, int base = 10);
-
-int charactersToInt(const LChar*, size_t, bool* ok = 0); // ignores trailing garbage
-WTF_EXPORT_STRING_API int charactersToInt(const UChar*, size_t, bool* ok = 0); // ignores trailing garbage
-unsigned charactersToUInt(const LChar*, size_t, bool* ok = 0); // ignores trailing garbage
-unsigned charactersToUInt(const UChar*, size_t, bool* ok = 0); // ignores trailing garbage
-int64_t charactersToInt64(const LChar*, size_t, bool* ok = 0); // ignores trailing garbage
-int64_t charactersToInt64(const UChar*, size_t, bool* ok = 0); // ignores trailing garbage
-uint64_t charactersToUInt64(const LChar*, size_t, bool* ok = 0); // ignores trailing garbage
-uint64_t charactersToUInt64(const UChar*, size_t, bool* ok = 0); // ignores trailing garbage
-intptr_t charactersToIntPtr(const LChar*, size_t, bool* ok = 0); // ignores trailing garbage
-intptr_t charactersToIntPtr(const UChar*, size_t, bool* ok = 0); // ignores trailing garbage
+WTF_EXPORT_STRING_API int charactersToIntStrict(const LChar*, size_t, bool* ok = nullptr, int base = 10);
+WTF_EXPORT_STRING_API int charactersToIntStrict(const UChar*, size_t, bool* ok = nullptr, int base = 10);
+WTF_EXPORT_STRING_API unsigned charactersToUIntStrict(const LChar*, size_t, bool* ok = nullptr, int base = 10);
+WTF_EXPORT_STRING_API unsigned charactersToUIntStrict(const UChar*, size_t, bool* ok = nullptr, int base = 10);
+int64_t charactersToInt64Strict(const LChar*, size_t, bool* ok = nullptr, int base = 10);
+int64_t charactersToInt64Strict(const UChar*, size_t, bool* ok = nullptr, int base = 10);
+uint64_t charactersToUInt64Strict(const LChar*, size_t, bool* ok = nullptr, int base = 10);
+uint64_t charactersToUInt64Strict(const UChar*, size_t, bool* ok = nullptr, int base = 10);
+intptr_t charactersToIntPtrStrict(const LChar*, size_t, bool* ok = nullptr, int base = 10);
+intptr_t charactersToIntPtrStrict(const UChar*, size_t, bool* ok = nullptr, int base = 10);
+
+WTF_EXPORT_STRING_API int charactersToInt(const LChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+WTF_EXPORT_STRING_API int charactersToInt(const UChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+unsigned charactersToUInt(const LChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+unsigned charactersToUInt(const UChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+int64_t charactersToInt64(const LChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+int64_t charactersToInt64(const UChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+uint64_t charactersToUInt64(const LChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+WTF_EXPORT_STRING_API uint64_t charactersToUInt64(const UChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+intptr_t charactersToIntPtr(const LChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
+intptr_t charactersToIntPtr(const UChar*, size_t, bool* ok = nullptr); // ignores trailing garbage
// FIXME: Like the strict functions above, these give false for "ok" when there is trailing garbage.
// Like the non-strict functions above, these return the value when there is trailing garbage.
// It would be better if these were more consistent with the above functions instead.
-WTF_EXPORT_STRING_API double charactersToDouble(const LChar*, size_t, bool* ok = 0);
-WTF_EXPORT_STRING_API double charactersToDouble(const UChar*, size_t, bool* ok = 0);
-float charactersToFloat(const LChar*, size_t, bool* ok = 0);
-WTF_EXPORT_STRING_API float charactersToFloat(const UChar*, size_t, bool* ok = 0);
+WTF_EXPORT_STRING_API double charactersToDouble(const LChar*, size_t, bool* ok = nullptr);
+WTF_EXPORT_STRING_API double charactersToDouble(const UChar*, size_t, bool* ok = nullptr);
+WTF_EXPORT_STRING_API float charactersToFloat(const LChar*, size_t, bool* ok = nullptr);
+WTF_EXPORT_STRING_API float charactersToFloat(const UChar*, size_t, bool* ok = nullptr);
WTF_EXPORT_STRING_API float charactersToFloat(const LChar*, size_t, size_t& parsedLength);
WTF_EXPORT_STRING_API float charactersToFloat(const UChar*, size_t, size_t& parsedLength);
-class ASCIILiteral;
-
-enum TrailingZerosTruncatingPolicy {
- KeepTrailingZeros,
- TruncateTrailingZeros
-};
+template<bool isSpecialCharacter(UChar), typename CharacterType> bool isAllSpecialCharacters(const CharacterType*, size_t);
-template<bool isSpecialCharacter(UChar), typename CharacterType>
-bool isAllSpecialCharacters(const CharacterType*, size_t);
+enum TrailingZerosTruncatingPolicy { KeepTrailingZeros, TruncateTrailingZeros };
class String {
public:
@@ -112,10 +106,13 @@ public:
WTF_EXPORT_STRING_API String(const char* characters);
// Construct a string referencing an existing StringImpl.
- String(StringImpl* impl) : m_impl(impl) { }
- String(PassRefPtr<StringImpl> impl) : m_impl(impl) { }
- String(PassRef<StringImpl> impl) : m_impl(std::move(impl)) { }
- String(RefPtr<StringImpl>&& impl) : m_impl(impl) { }
+ String(StringImpl&);
+ String(StringImpl*);
+ String(Ref<StringImpl>&&);
+ String(RefPtr<StringImpl>&&);
+
+ String(Ref<AtomicStringImpl>&&);
+ String(RefPtr<AtomicStringImpl>&&);
// Construct a string from a constant string literal.
WTF_EXPORT_STRING_API String(ASCIILiteral characters);
@@ -128,26 +125,30 @@ public:
// We have to declare the copy constructor and copy assignment operator as well, otherwise
// they'll be implicitly deleted by adding the move constructor and move assignment operator.
- String(const String& other) : m_impl(other.m_impl) { }
- String(String&& other) : m_impl(other.m_impl.release()) { }
+ String(const String& other)
+ : m_impl(other.m_impl)
+ { }
+ String(String&& other)
+ : m_impl(WTFMove(other.m_impl))
+ { }
String& operator=(const String& other) { m_impl = other.m_impl; return *this; }
- String& operator=(String&& other) { m_impl = other.m_impl.release(); return *this; }
+ String& operator=(String&& other) { m_impl = WTFMove(other.m_impl); return *this; }
// Inline the destructor.
ALWAYS_INLINE ~String() { }
void swap(String& o) { m_impl.swap(o.m_impl); }
- static String adopt(StringBuffer<LChar>& buffer) { return StringImpl::adopt(buffer); }
- static String adopt(StringBuffer<UChar>& buffer) { return StringImpl::adopt(buffer); }
+ static String adopt(StringBuffer<LChar>&& buffer) { return StringImpl::adopt(WTFMove(buffer)); }
+ static String adopt(StringBuffer<UChar>&& buffer) { return StringImpl::adopt(WTFMove(buffer)); }
template<typename CharacterType, size_t inlineCapacity, typename OverflowHandler>
- static String adopt(Vector<CharacterType, inlineCapacity, OverflowHandler>& vector) { return StringImpl::adopt(vector); }
+ static String adopt(Vector<CharacterType, inlineCapacity, OverflowHandler>&& vector) { return StringImpl::adopt(WTFMove(vector)); }
bool isNull() const { return !m_impl; }
bool isEmpty() const { return !m_impl || !m_impl->length(); }
StringImpl* impl() const { return m_impl.get(); }
- PassRefPtr<StringImpl> releaseImpl() { return m_impl.release(); }
+ RefPtr<StringImpl> releaseImpl() { return WTFMove(m_impl); }
unsigned length() const
{
@@ -156,14 +157,6 @@ public:
return m_impl->length();
}
- const UChar* characters() const { return deprecatedCharacters(); } // FIXME: Delete this.
- const UChar* deprecatedCharacters() const
- {
- if (!m_impl)
- return 0;
- return m_impl->deprecatedCharacters();
- }
-
const LChar* characters8() const
{
if (!m_impl)
@@ -182,11 +175,7 @@ public:
// Return characters8() or characters16() depending on CharacterType.
template <typename CharacterType>
- inline const CharacterType* getCharacters() const;
-
- // Like getCharacters() and upconvert if CharacterType is UChar on a 8bit string.
- template <typename CharacterType>
- inline const CharacterType* getCharactersWithUpconvert() const;
+ inline const CharacterType* characters() const;
bool is8Bit() const { return m_impl->is8Bit(); }
@@ -200,7 +189,8 @@ public:
WTF_EXPORT_STRING_API CString ascii() const;
WTF_EXPORT_STRING_API CString latin1() const;
- WTF_EXPORT_STRING_API CString utf8(ConversionMode = LenientConversion) const;
+ WTF_EXPORT_STRING_API CString utf8(ConversionMode) const;
+ WTF_EXPORT_STRING_API CString utf8() const;
UChar at(unsigned index) const
{
@@ -231,15 +221,16 @@ public:
{ return m_impl ? m_impl->find(str.impl()) : notFound; }
size_t find(const String& str, unsigned start) const
{ return m_impl ? m_impl->find(str.impl(), start) : notFound; }
+ size_t findIgnoringASCIICase(const String& str) const
+ { return m_impl ? m_impl->findIgnoringASCIICase(str.impl()) : notFound; }
+ size_t findIgnoringASCIICase(const String& str, unsigned startOffset) const
+ { return m_impl ? m_impl->findIgnoringASCIICase(str.impl(), startOffset) : notFound; }
size_t find(CharacterMatchFunctionPtr matchFunction, unsigned start = 0) const
{ return m_impl ? m_impl->find(matchFunction, start) : notFound; }
size_t find(const LChar* str, unsigned start = 0) const
{ return m_impl ? m_impl->find(str, start) : notFound; }
- size_t findNextLineStart(unsigned start = 0) const
- { return m_impl ? m_impl->findNextLineStart(start) : notFound; }
-
// Find the last instance of a single character or string.
size_t reverseFind(UChar c, unsigned start = UINT_MAX) const
{ return m_impl ? m_impl->reverseFind(c, start) : notFound; }
@@ -267,11 +258,21 @@ public:
WTF_EXPORT_STRING_API UChar32 characterStartingAt(unsigned) const; // Ditto.
bool contains(UChar c) const { return find(c) != notFound; }
- bool contains(const LChar* str, bool caseSensitive = true) const { return find(str, 0, caseSensitive) != notFound; }
- bool contains(const String& str, bool caseSensitive = true) const { return find(str, 0, caseSensitive) != notFound; }
+ bool contains(const LChar* str, bool caseSensitive = true, unsigned startOffset = 0) const
+ { return find(str, startOffset, caseSensitive) != notFound; }
+ bool contains(const String& str) const
+ { return find(str) != notFound; }
+ bool contains(const String& str, bool caseSensitive, unsigned startOffset = 0) const
+ { return find(str, startOffset, caseSensitive) != notFound; }
+ bool containsIgnoringASCIICase(const String& str) const
+ { return findIgnoringASCIICase(str) != notFound; }
+ bool containsIgnoringASCIICase(const String& str, unsigned startOffset) const
+ { return findIgnoringASCIICase(str, startOffset) != notFound; }
bool startsWith(const String& s) const
{ return m_impl ? m_impl->startsWith(s.impl()) : s.isEmpty(); }
+ bool startsWithIgnoringASCIICase(const String& s) const
+ { return m_impl ? m_impl->startsWithIgnoringASCIICase(s.impl()) : s.isEmpty(); }
bool startsWith(const String& s, bool caseSensitive) const
{ return m_impl ? m_impl->startsWith(s.impl(), caseSensitive) : s.isEmpty(); }
bool startsWith(UChar character) const
@@ -279,14 +280,23 @@ public:
template<unsigned matchLength>
bool startsWith(const char (&prefix)[matchLength], bool caseSensitive = true) const
{ return m_impl ? m_impl->startsWith<matchLength>(prefix, caseSensitive) : !matchLength; }
-
- bool endsWith(const String& s, bool caseSensitive = true) const
+ bool hasInfixStartingAt(const String& prefix, unsigned startOffset) const
+ { return m_impl && prefix.impl() ? m_impl->hasInfixStartingAt(*prefix.impl(), startOffset) : false; }
+
+ bool endsWith(const String& s) const
+ { return m_impl ? m_impl->endsWith(s.impl()) : s.isEmpty(); }
+ bool endsWithIgnoringASCIICase(const String& s) const
+ { return m_impl ? m_impl->endsWithIgnoringASCIICase(s.impl()) : s.isEmpty(); }
+ bool endsWith(const String& s, bool caseSensitive) const
{ return m_impl ? m_impl->endsWith(s.impl(), caseSensitive) : s.isEmpty(); }
bool endsWith(UChar character) const
{ return m_impl ? m_impl->endsWith(character) : false; }
+ bool endsWith(char character) const { return endsWith(static_cast<UChar>(character)); }
template<unsigned matchLength>
bool endsWith(const char (&prefix)[matchLength], bool caseSensitive = true) const
{ return m_impl ? m_impl->endsWith<matchLength>(prefix, caseSensitive) : !matchLength; }
+ bool hasInfixEndingAt(const String& suffix, unsigned endOffset) const
+ { return m_impl && suffix.impl() ? m_impl->hasInfixEndingAt(*suffix.impl(), endOffset) : false; }
WTF_EXPORT_STRING_API void append(const String&);
WTF_EXPORT_STRING_API void append(LChar);
@@ -295,7 +305,6 @@ public:
WTF_EXPORT_STRING_API void append(const LChar*, unsigned length);
WTF_EXPORT_STRING_API void append(const UChar*, unsigned length);
WTF_EXPORT_STRING_API void insert(const String&, unsigned pos);
- void insert(const UChar*, unsigned length, unsigned pos);
String& replace(UChar a, UChar b) { if (m_impl) m_impl = m_impl->replace(a, b); return *this; }
String& replace(UChar a, const String& b) { if (m_impl) m_impl = m_impl->replace(a, b.impl()); return *this; }
@@ -311,8 +320,6 @@ public:
return *this;
}
- void fill(UChar c) { if (m_impl) m_impl = m_impl->fill(c); }
-
WTF_EXPORT_STRING_API void truncate(unsigned len);
WTF_EXPORT_STRING_API void remove(unsigned pos, int len = 1);
@@ -321,12 +328,13 @@ public:
String left(unsigned len) const { return substring(0, len); }
String right(unsigned len) const { return substring(length() - len, len); }
- // Returns a lowercase/uppercase version of the string
- WTF_EXPORT_STRING_API String lower() const;
- WTF_EXPORT_STRING_API String upper() const;
-
- WTF_EXPORT_STRING_API String lower(const AtomicString& localeIdentifier) const;
- WTF_EXPORT_STRING_API String upper(const AtomicString& localeIdentifier) const;
+ WTF_EXPORT_STRING_API String convertToASCIILowercase() const;
+ WTF_EXPORT_STRING_API String convertToASCIIUppercase() const;
+ WTF_EXPORT_STRING_API String convertToLowercaseWithoutLocale() const;
+ WTF_EXPORT_STRING_API String convertToLowercaseWithoutLocaleStartingAtFailingIndex8Bit(unsigned) const;
+ WTF_EXPORT_STRING_API String convertToUppercaseWithoutLocale() const;
+ WTF_EXPORT_STRING_API String convertToLowercaseWithLocale(const AtomicString& localeIdentifier) const;
+ WTF_EXPORT_STRING_API String convertToUppercaseWithLocale(const AtomicString& localeIdentifier) const;
WTF_EXPORT_STRING_API String stripWhiteSpace() const;
WTF_EXPORT_STRING_API String stripWhiteSpace(IsWhiteSpaceFunctionPtr) const;
@@ -336,7 +344,8 @@ public:
WTF_EXPORT_STRING_API String removeCharacters(CharacterMatchFunctionPtr) const;
template<bool isSpecialCharacter(UChar)> bool isAllSpecialCharacters() const;
- // Return the string with case folded for case insensitive comparison.
+ // Returns the string with case folded for case insensitive comparison.
+ // Use convertToASCIILowercase instead if ASCII case insensitive comparison is desired.
WTF_EXPORT_STRING_API String foldCase() const;
WTF_EXPORT_STRING_API static String format(const char *, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
@@ -358,29 +367,29 @@ public:
split(separator, false, result);
}
- WTF_EXPORT_STRING_API int toIntStrict(bool* ok = 0, int base = 10) const;
- WTF_EXPORT_STRING_API unsigned toUIntStrict(bool* ok = 0, int base = 10) const;
- WTF_EXPORT_STRING_API int64_t toInt64Strict(bool* ok = 0, int base = 10) const;
- WTF_EXPORT_STRING_API uint64_t toUInt64Strict(bool* ok = 0, int base = 10) const;
- intptr_t toIntPtrStrict(bool* ok = 0, int base = 10) const;
+ WTF_EXPORT_STRING_API int toIntStrict(bool* ok = nullptr, int base = 10) const;
+ WTF_EXPORT_STRING_API unsigned toUIntStrict(bool* ok = nullptr, int base = 10) const;
+ WTF_EXPORT_STRING_API int64_t toInt64Strict(bool* ok = nullptr, int base = 10) const;
+ WTF_EXPORT_STRING_API uint64_t toUInt64Strict(bool* ok = nullptr, int base = 10) const;
+ WTF_EXPORT_STRING_API intptr_t toIntPtrStrict(bool* ok = nullptr, int base = 10) const;
- WTF_EXPORT_STRING_API int toInt(bool* ok = 0) const;
- WTF_EXPORT_STRING_API unsigned toUInt(bool* ok = 0) const;
- WTF_EXPORT_STRING_API int64_t toInt64(bool* ok = 0) const;
- WTF_EXPORT_STRING_API uint64_t toUInt64(bool* ok = 0) const;
- WTF_EXPORT_STRING_API intptr_t toIntPtr(bool* ok = 0) const;
+ WTF_EXPORT_STRING_API int toInt(bool* ok = nullptr) const;
+ WTF_EXPORT_STRING_API unsigned toUInt(bool* ok = nullptr) const;
+ WTF_EXPORT_STRING_API int64_t toInt64(bool* ok = nullptr) const;
+ WTF_EXPORT_STRING_API uint64_t toUInt64(bool* ok = nullptr) const;
+ WTF_EXPORT_STRING_API intptr_t toIntPtr(bool* ok = nullptr) const;
// FIXME: Like the strict functions above, these give false for "ok" when there is trailing garbage.
// Like the non-strict functions above, these return the value when there is trailing garbage.
// It would be better if these were more consistent with the above functions instead.
- WTF_EXPORT_STRING_API double toDouble(bool* ok = 0) const;
- WTF_EXPORT_STRING_API float toFloat(bool* ok = 0) const;
+ WTF_EXPORT_STRING_API double toDouble(bool* ok = nullptr) const;
+ WTF_EXPORT_STRING_API float toFloat(bool* ok = nullptr) const;
bool percentage(int& percentage) const;
#if COMPILER_SUPPORTS(CXX_REFERENCE_QUALIFIED_FUNCTIONS)
WTF_EXPORT_STRING_API String isolatedCopy() const &;
- WTF_EXPORT_STRING_API String isolatedCopy() const &&;
+ WTF_EXPORT_STRING_API String isolatedCopy() &&;
#else
WTF_EXPORT_STRING_API String isolatedCopy() const;
#endif
@@ -388,7 +397,7 @@ public:
WTF_EXPORT_STRING_API bool isSafeToSendToAnotherThread() const;
// Prevent Strings from being implicitly convertable to bool as it will be ambiguous on any platform that
- // allows implicit conversion to another pointer type (e.g., Mac allows implicit conversion to NSString*).
+ // allows implicit conversion to another pointer type (e.g., Mac allows implicit conversion to NSString *).
typedef struct ImplicitConversionFromWTFStringToBoolDisallowedA* (String::*UnspecifiedBoolTypeA);
typedef struct ImplicitConversionFromWTFStringToBoolDisallowedB* (String::*UnspecifiedBoolTypeB);
operator UnspecifiedBoolTypeA() const;
@@ -400,11 +409,12 @@ public:
#endif
#ifdef __OBJC__
- WTF_EXPORT_STRING_API String(NSString*);
+ WTF_EXPORT_STRING_API String(NSString *);
- // This conversion maps NULL to "", which loses the meaning of NULL, but we
- // need this mapping because AppKit crashes when passed nil NSStrings.
- operator NSString*() const { if (!m_impl) return @""; return *m_impl; }
+ // This conversion converts the null string to an empty NSString rather than to nil.
+ // Given Cocoa idioms, this is a more useful default. Clients that need to preserve the
+ // null string can check isNull explicitly.
+ operator NSString *() const;
#endif
WTF_EXPORT_STRING_API static String make8BitFrom16BitSource(const UChar*, size_t);
@@ -446,6 +456,8 @@ public:
String(WTF::HashTableDeletedValueType) : m_impl(WTF::HashTableDeletedValue) { }
bool isHashTableDeletedValue() const { return m_impl.isHashTableDeletedValue(); }
+ unsigned existingHash() const { return isNull() ? 0 : impl()->existingHash(); }
+
#ifndef NDEBUG
WTF_EXPORT_STRING_API void show() const;
#endif
@@ -458,6 +470,14 @@ public:
return (*m_impl)[index];
}
+ // Turns this String empty if the StringImpl is not referenced by anyone else.
+ // This is useful for clearing String-based caches.
+ void clearImplIfNotShared()
+ {
+ if (m_impl && m_impl->hasOneRef())
+ m_impl = nullptr;
+ }
+
private:
template <typename CharacterType>
void removeInternal(const CharacterType*, unsigned, int);
@@ -473,10 +493,8 @@ inline bool operator==(const String& a, const LChar* b) { return equal(a.impl(),
inline bool operator==(const String& a, const char* b) { return equal(a.impl(), reinterpret_cast<const LChar*>(b)); }
inline bool operator==(const LChar* a, const String& b) { return equal(a, b.impl()); }
inline bool operator==(const char* a, const String& b) { return equal(reinterpret_cast<const LChar*>(a), b.impl()); }
-template<size_t inlineCapacity>
-inline bool operator==(const Vector<char, inlineCapacity>& a, const String& b) { return equal(b.impl(), a.data(), a.size()); }
-template<size_t inlineCapacity>
-inline bool operator==(const String& a, const Vector<char, inlineCapacity>& b) { return b == a; }
+template<size_t inlineCapacity> inline bool operator==(const Vector<char, inlineCapacity>& a, const String& b) { return equal(b.impl(), a.data(), a.size()); }
+template<size_t inlineCapacity> inline bool operator==(const String& a, const Vector<char, inlineCapacity>& b) { return b == a; }
inline bool operator!=(const String& a, const String& b) { return !equal(a.impl(), b.impl()); }
@@ -484,64 +502,79 @@ inline bool operator!=(const String& a, const LChar* b) { return !equal(a.impl()
inline bool operator!=(const String& a, const char* b) { return !equal(a.impl(), reinterpret_cast<const LChar*>(b)); }
inline bool operator!=(const LChar* a, const String& b) { return !equal(a, b.impl()); }
inline bool operator!=(const char* a, const String& b) { return !equal(reinterpret_cast<const LChar*>(a), b.impl()); }
-template<size_t inlineCapacity>
-inline bool operator!=(const Vector<char, inlineCapacity>& a, const String& b) { return !(a == b); }
-template<size_t inlineCapacity>
-inline bool operator!=(const String& a, const Vector<char, inlineCapacity>& b) { return b != a; }
-
-inline bool equalIgnoringCase(const String& a, const String& b) { return equalIgnoringCase(a.impl(), b.impl()); }
-inline bool equalIgnoringCase(const String& a, const LChar* b) { return equalIgnoringCase(a.impl(), b); }
-inline bool equalIgnoringCase(const String& a, const char* b) { return equalIgnoringCase(a.impl(), reinterpret_cast<const LChar*>(b)); }
-inline bool equalIgnoringCase(const LChar* a, const String& b) { return equalIgnoringCase(a, b.impl()); }
-inline bool equalIgnoringCase(const char* a, const String& b) { return equalIgnoringCase(reinterpret_cast<const LChar*>(a), b.impl()); }
-
-inline bool equalPossiblyIgnoringCase(const String& a, const String& b, bool ignoreCase)
-{
- return ignoreCase ? equalIgnoringCase(a, b) : (a == b);
-}
+template<size_t inlineCapacity> inline bool operator!=(const Vector<char, inlineCapacity>& a, const String& b) { return !(a == b); }
+template<size_t inlineCapacity> inline bool operator!=(const String& a, const Vector<char, inlineCapacity>& b) { return b != a; }
-inline bool equalIgnoringNullity(const String& a, const String& b) { return equalIgnoringNullity(a.impl(), b.impl()); }
+bool equalIgnoringASCIICase(const String&, const String&);
+bool equalIgnoringASCIICase(const String&, const char*);
-template<size_t inlineCapacity>
-inline bool equalIgnoringNullity(const Vector<UChar, inlineCapacity>& a, const String& b) { return equalIgnoringNullity(a, b.impl()); }
+template<unsigned length> bool equalLettersIgnoringASCIICase(const String&, const char (&lowercaseLetters)[length]);
+template<unsigned length> bool startsWithLettersIgnoringASCIICase(const String&, const char (&lowercaseLetters)[length]);
+
+inline bool equalIgnoringNullity(const String& a, const String& b) { return equalIgnoringNullity(a.impl(), b.impl()); }
+template<size_t inlineCapacity> inline bool equalIgnoringNullity(const Vector<UChar, inlineCapacity>& a, const String& b) { return equalIgnoringNullity(a, b.impl()); }
inline bool operator!(const String& str) { return str.isNull(); }
inline void swap(String& a, String& b) { a.swap(b); }
+#ifdef __OBJC__
+
+// Used in a small number of places where the long standing behavior has been "nil if empty".
+NSString * nsStringNilIfEmpty(const String&);
+
+#endif
+
// Definitions of string operations
-template<size_t inlineCapacity, typename OverflowHandler>
-String::String(const Vector<UChar, inlineCapacity, OverflowHandler>& vector)
- : m_impl(vector.size() ? StringImpl::create(vector.data(), vector.size()) : *StringImpl::empty())
+inline String::String(StringImpl& impl)
+ : m_impl(&impl)
{
}
-template<>
-inline const LChar* String::getCharacters<LChar>() const
+inline String::String(StringImpl* impl)
+ : m_impl(impl)
{
- ASSERT(is8Bit());
- return characters8();
}
-template<>
-inline const UChar* String::getCharacters<UChar>() const
+inline String::String(Ref<StringImpl>&& impl)
+ : m_impl(WTFMove(impl))
+{
+}
+
+inline String::String(RefPtr<StringImpl>&& impl)
+ : m_impl(WTFMove(impl))
+{
+}
+
+inline String::String(Ref<AtomicStringImpl>&& impl)
+ : m_impl(WTFMove(impl))
+{
+}
+
+inline String::String(RefPtr<AtomicStringImpl>&& impl)
+ : m_impl(WTFMove(impl))
+{
+}
+
+template<size_t inlineCapacity, typename OverflowHandler>
+String::String(const Vector<UChar, inlineCapacity, OverflowHandler>& vector)
+ : m_impl(vector.size() ? StringImpl::create(vector.data(), vector.size()) : Ref<StringImpl>(*StringImpl::empty()))
{
- ASSERT(!is8Bit());
- return characters16();
}
template<>
-inline const LChar* String::getCharactersWithUpconvert<LChar>() const
+inline const LChar* String::characters<LChar>() const
{
ASSERT(is8Bit());
return characters8();
}
template<>
-inline const UChar* String::getCharactersWithUpconvert<UChar>() const
+inline const UChar* String::characters<UChar>() const
{
- return deprecatedCharacters();
+ ASSERT(!is8Bit());
+ return characters16();
}
inline bool String::containsOnlyLatin1() const
@@ -559,12 +592,22 @@ inline bool String::containsOnlyLatin1() const
return !(ored & 0xFF00);
}
-
#ifdef __OBJC__
-// This is for situations in WebKit where the long standing behavior has been
-// "nil if empty", so we try to maintain longstanding behavior for the sake of
-// entrenched clients
-inline NSString* nsStringNilIfEmpty(const String& str) { return str.isEmpty() ? nil : (NSString*)str; }
+
+inline String::operator NSString *() const
+{
+ if (!m_impl)
+ return @"";
+ return *m_impl;
+}
+
+inline NSString * nsStringNilIfEmpty(const String& string)
+{
+ if (string.isEmpty())
+ return nil;
+ return *string.impl();
+}
+
#endif
inline bool String::containsOnlyASCII() const
@@ -585,12 +628,6 @@ inline bool codePointCompareLessThan(const String& a, const String& b)
return codePointCompare(a.impl(), b.impl()) < 0;
}
-template<size_t inlineCapacity>
-inline void append(Vector<UChar, inlineCapacity>& vector, const String& string)
-{
- vector.append(string.deprecatedCharacters(), string.length());
-}
-
template<typename CharacterType>
inline void appendNumber(Vector<CharacterType>& vector, unsigned char number)
{
@@ -657,13 +694,38 @@ private:
// Shared global empty string.
WTF_EXPORT_STRING_API const String& emptyString();
+template<unsigned length> inline bool equalLettersIgnoringASCIICase(const String& string, const char (&lowercaseLetters)[length])
+{
+ return equalLettersIgnoringASCIICase(string.impl(), lowercaseLetters);
+}
+
+inline bool equalIgnoringASCIICase(const String& a, const String& b)
+{
+ return equalIgnoringASCIICase(a.impl(), b.impl());
+}
+
+inline bool equalIgnoringASCIICase(const String& a, const char* b)
+{
+ return equalIgnoringASCIICase(a.impl(), b);
+}
+
+template<unsigned length> inline bool startsWithLettersIgnoringASCIICase(const String& string, const char (&lowercaseLetters)[length])
+{
+ return startsWithLettersIgnoringASCIICase(string.impl(), lowercaseLetters);
+}
+
+template<> struct IntegerToStringConversionTrait<String> {
+ using ReturnType = String;
+ using AdditionalArgumentType = void;
+ static String flush(LChar* characters, unsigned length, void*) { return { characters, length }; }
+};
+
}
using WTF::CString;
using WTF::KeepTrailingZeros;
using WTF::String;
using WTF::emptyString;
-using WTF::append;
using WTF::appendNumber;
using WTF::charactersAreAllASCII;
using WTF::charactersToIntStrict;
@@ -679,7 +741,6 @@ using WTF::charactersToIntPtr;
using WTF::charactersToDouble;
using WTF::charactersToFloat;
using WTF::equal;
-using WTF::equalIgnoringCase;
using WTF::find;
using WTF::isAllSpecialCharacters;
using WTF::isSpaceOrNewline;
@@ -687,4 +748,5 @@ using WTF::reverseFind;
using WTF::ASCIILiteral;
#include <wtf/text/AtomicString.h>
+
#endif
diff --git a/Source/WTF/wtf/text/icu/UTextProvider.cpp b/Source/WTF/wtf/text/icu/UTextProvider.cpp
new file mode 100644
index 000000000..7388fdbf7
--- /dev/null
+++ b/Source/WTF/wtf/text/icu/UTextProvider.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UTextProvider.h"
+
+#include <algorithm>
+#include <string.h>
+
+namespace WTF {
+
+// Relocate pointer from source into destination as required.
+static inline void fixPointer(const UText* source, UText* destination, const void*& pointer)
+{
+ if (pointer >= source->pExtra && pointer < static_cast<char*>(source->pExtra) + source->extraSize) {
+ // Pointer references source extra buffer.
+ pointer = static_cast<char*>(destination->pExtra) + (static_cast<const char*>(pointer) - static_cast<const char*>(source->pExtra));
+ } else if (pointer >= source && pointer < reinterpret_cast<const char*>(source) + source->sizeOfStruct) {
+ // Pointer references source text structure, but not source extra buffer.
+ pointer = reinterpret_cast<char*>(destination) + (static_cast<const char*>(pointer) - reinterpret_cast<const char*>(source));
+ }
+}
+
+UText* uTextCloneImpl(UText* destination, const UText* source, UBool deep, UErrorCode* status)
+{
+ ASSERT_UNUSED(deep, !deep);
+ if (U_FAILURE(*status))
+ return nullptr;
+ int32_t extraSize = source->extraSize;
+ destination = utext_setup(destination, extraSize, status);
+ if (U_FAILURE(*status))
+ return destination;
+ void* extraNew = destination->pExtra;
+ int32_t flags = destination->flags;
+ int sizeToCopy = std::min(source->sizeOfStruct, destination->sizeOfStruct);
+ memcpy(destination, source, sizeToCopy);
+ destination->pExtra = extraNew;
+ destination->flags = flags;
+ memcpy(destination->pExtra, source->pExtra, extraSize);
+ fixPointer(source, destination, destination->context);
+ fixPointer(source, destination, destination->p);
+ fixPointer(source, destination, destination->q);
+ ASSERT(!destination->r);
+ const void* chunkContents = static_cast<const void*>(destination->chunkContents);
+ fixPointer(source, destination, chunkContents);
+ destination->chunkContents = static_cast<const UChar*>(chunkContents);
+ return destination;
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/text/icu/UTextProvider.h b/Source/WTF/wtf/text/icu/UTextProvider.h
new file mode 100644
index 000000000..2f0af9972
--- /dev/null
+++ b/Source/WTF/wtf/text/icu/UTextProvider.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTextProvider_h
+#define UTextProvider_h
+
+#include <unicode/utext.h>
+
+namespace WTF {
+
+enum class UTextProviderContext {
+ NoContext,
+ PriorContext,
+ PrimaryContext
+};
+
+inline UTextProviderContext uTextProviderContext(const UText* text, int64_t nativeIndex, UBool forward)
+{
+ if (!text->b || nativeIndex > text->b)
+ return UTextProviderContext::PrimaryContext;
+ if (nativeIndex == text->b)
+ return forward ? UTextProviderContext::PrimaryContext : UTextProviderContext::PriorContext;
+ return UTextProviderContext::PriorContext;
+}
+
+inline void initializeContextAwareUTextProvider(UText* text, const UTextFuncs* funcs, const void* string, unsigned length, const UChar* priorContext, int priorContextLength)
+{
+ text->pFuncs = funcs;
+ text->providerProperties = 1 << UTEXT_PROVIDER_STABLE_CHUNKS;
+ text->context = string;
+ text->p = string;
+ text->a = length;
+ text->q = priorContext;
+ text->b = priorContextLength;
+}
+
+// Shared implementation for the UTextClone function on UTextFuncs.
+
+UText* uTextCloneImpl(UText* destination, const UText* source, UBool deep, UErrorCode* status);
+
+
+// Helpers for the UTextAccess function on UTextFuncs.
+
+inline int64_t uTextAccessPinIndex(int64_t& index, int64_t limit)
+{
+ if (index < 0)
+ index = 0;
+ else if (index > limit)
+ index = limit;
+ return index;
+}
+
+inline bool uTextAccessInChunkOrOutOfRange(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward, UBool& isAccessible)
+{
+ if (forward) {
+ if (nativeIndex >= text->chunkNativeStart && nativeIndex < text->chunkNativeLimit) {
+ int64_t offset = nativeIndex - text->chunkNativeStart;
+ // Ensure chunk offset is well formed if computed offset exceeds int32_t range.
+ ASSERT(offset < std::numeric_limits<int32_t>::max());
+ text->chunkOffset = offset < std::numeric_limits<int32_t>::max() ? static_cast<int32_t>(offset) : 0;
+ isAccessible = TRUE;
+ return true;
+ }
+ if (nativeIndex >= nativeLength && text->chunkNativeLimit == nativeLength) {
+ text->chunkOffset = text->chunkLength;
+ isAccessible = FALSE;
+ return true;
+ }
+ } else {
+ if (nativeIndex > text->chunkNativeStart && nativeIndex <= text->chunkNativeLimit) {
+ int64_t offset = nativeIndex - text->chunkNativeStart;
+ // Ensure chunk offset is well formed if computed offset exceeds int32_t range.
+ ASSERT(offset < std::numeric_limits<int32_t>::max());
+ text->chunkOffset = offset < std::numeric_limits<int32_t>::max() ? static_cast<int32_t>(offset) : 0;
+ isAccessible = TRUE;
+ return true;
+ }
+ if (nativeIndex <= 0 && !text->chunkNativeStart) {
+ text->chunkOffset = 0;
+ isAccessible = FALSE;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace WTF
+
+#endif // UTextProvider_h
diff --git a/Source/WTF/wtf/text/icu/UTextProviderLatin1.cpp b/Source/WTF/wtf/text/icu/UTextProviderLatin1.cpp
new file mode 100644
index 000000000..25a0e1e86
--- /dev/null
+++ b/Source/WTF/wtf/text/icu/UTextProviderLatin1.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UTextProviderLatin1.h"
+
+#include "UTextProvider.h"
+#include <wtf/text/StringImpl.h>
+
+namespace WTF {
+
+// Latin1 provider
+
+static UText* uTextLatin1Clone(UText*, const UText*, UBool, UErrorCode*);
+static int64_t uTextLatin1NativeLength(UText*);
+static UBool uTextLatin1Access(UText*, int64_t, UBool);
+static int32_t uTextLatin1Extract(UText*, int64_t, int64_t, UChar*, int32_t, UErrorCode*);
+static int64_t uTextLatin1MapOffsetToNative(const UText*);
+static int32_t uTextLatin1MapNativeIndexToUTF16(const UText*, int64_t);
+static void uTextLatin1Close(UText*);
+
+static const struct UTextFuncs uTextLatin1Funcs = {
+ sizeof(UTextFuncs),
+ 0,
+ 0,
+ 0,
+ uTextLatin1Clone,
+ uTextLatin1NativeLength,
+ uTextLatin1Access,
+ uTextLatin1Extract,
+ nullptr,
+ nullptr,
+ uTextLatin1MapOffsetToNative,
+ uTextLatin1MapNativeIndexToUTF16,
+ uTextLatin1Close,
+ nullptr,
+ nullptr,
+ nullptr
+};
+
+static UText* uTextLatin1Clone(UText* destination, const UText* source, UBool deep, UErrorCode* status)
+{
+ ASSERT_UNUSED(deep, !deep);
+
+ if (U_FAILURE(*status))
+ return 0;
+
+ UText* result = utext_setup(destination, sizeof(UChar) * UTextWithBufferInlineCapacity, status);
+ if (U_FAILURE(*status))
+ return destination;
+
+ result->providerProperties = source->providerProperties;
+
+ // Point at the same position, but with an empty buffer.
+ result->chunkNativeStart = source->chunkNativeStart;
+ result->chunkNativeLimit = source->chunkNativeStart;
+ result->nativeIndexingLimit = static_cast<int32_t>(source->chunkNativeStart);
+ result->chunkOffset = 0;
+ result->context = source->context;
+ result->a = source->a;
+ result->pFuncs = &uTextLatin1Funcs;
+ result->chunkContents = (UChar*)result->pExtra;
+ memset(const_cast<UChar*>(result->chunkContents), 0, sizeof(UChar) * UTextWithBufferInlineCapacity);
+
+ return result;
+}
+
+static int64_t uTextLatin1NativeLength(UText* uText)
+{
+ return uText->a;
+}
+
+static UBool uTextLatin1Access(UText* uText, int64_t index, UBool forward)
+{
+ int64_t length = uText->a;
+
+ if (forward) {
+ if (index < uText->chunkNativeLimit && index >= uText->chunkNativeStart) {
+ // Already inside the buffer. Set the new offset.
+ uText->chunkOffset = static_cast<int32_t>(index - uText->chunkNativeStart);
+ return TRUE;
+ }
+ if (index >= length && uText->chunkNativeLimit == length) {
+ // Off the end of the buffer, but we can't get it.
+ uText->chunkOffset = static_cast<int32_t>(index - uText->chunkNativeStart);
+ return FALSE;
+ }
+ } else {
+ if (index <= uText->chunkNativeLimit && index > uText->chunkNativeStart) {
+ // Already inside the buffer. Set the new offset.
+ uText->chunkOffset = static_cast<int32_t>(index - uText->chunkNativeStart);
+ return TRUE;
+ }
+ if (!index && !uText->chunkNativeStart) {
+ // Already at the beginning; can't go any farther.
+ uText->chunkOffset = 0;
+ return FALSE;
+ }
+ }
+
+ if (forward) {
+ uText->chunkNativeStart = index;
+ uText->chunkNativeLimit = uText->chunkNativeStart + UTextWithBufferInlineCapacity;
+ if (uText->chunkNativeLimit > length)
+ uText->chunkNativeLimit = length;
+
+ uText->chunkOffset = 0;
+ } else {
+ uText->chunkNativeLimit = index;
+ if (uText->chunkNativeLimit > length)
+ uText->chunkNativeLimit = length;
+
+ uText->chunkNativeStart = uText->chunkNativeLimit - UTextWithBufferInlineCapacity;
+ if (uText->chunkNativeStart < 0)
+ uText->chunkNativeStart = 0;
+
+ uText->chunkOffset = static_cast<int32_t>(index - uText->chunkNativeStart);
+ }
+ uText->chunkLength = static_cast<int32_t>(uText->chunkNativeLimit - uText->chunkNativeStart);
+
+ StringImpl::copyChars(const_cast<UChar*>(uText->chunkContents), static_cast<const LChar*>(uText->context) + uText->chunkNativeStart, static_cast<unsigned>(uText->chunkLength));
+
+ uText->nativeIndexingLimit = uText->chunkLength;
+
+ return TRUE;
+}
+
+static int32_t uTextLatin1Extract(UText* uText, int64_t start, int64_t limit, UChar* dest, int32_t destCapacity, UErrorCode* status)
+{
+ int64_t length = uText->a;
+ if (U_FAILURE(*status))
+ return 0;
+
+ if (destCapacity < 0 || (!dest && destCapacity > 0)) {
+ *status = U_ILLEGAL_ARGUMENT_ERROR;
+ return 0;
+ }
+
+ if (start < 0 || start > limit || (limit - start) > INT32_MAX) {
+ *status = U_INDEX_OUTOFBOUNDS_ERROR;
+ return 0;
+ }
+
+ if (start > length)
+ start = length;
+ if (limit > length)
+ limit = length;
+
+ length = limit - start;
+
+ if (!length)
+ return 0;
+
+ if (destCapacity > 0 && !dest) {
+ int32_t trimmedLength = static_cast<int32_t>(length);
+ if (trimmedLength > destCapacity)
+ trimmedLength = destCapacity;
+
+ StringImpl::copyChars(dest, static_cast<const LChar*>(uText->context) + start, static_cast<unsigned>(trimmedLength));
+ }
+
+ if (length < destCapacity) {
+ dest[length] = 0;
+ if (*status == U_STRING_NOT_TERMINATED_WARNING)
+ *status = U_ZERO_ERROR;
+ } else if (length == destCapacity)
+ *status = U_STRING_NOT_TERMINATED_WARNING;
+ else
+ *status = U_BUFFER_OVERFLOW_ERROR;
+
+ return static_cast<int32_t>(length);
+}
+
+static int64_t uTextLatin1MapOffsetToNative(const UText* uText)
+{
+ return uText->chunkNativeStart + uText->chunkOffset;
+}
+
+static int32_t uTextLatin1MapNativeIndexToUTF16(const UText* uText, int64_t nativeIndex)
+{
+ ASSERT_UNUSED(uText, uText->chunkNativeStart >= nativeIndex);
+ ASSERT_UNUSED(uText, nativeIndex < uText->chunkNativeLimit);
+ return static_cast<int32_t>(nativeIndex);
+}
+
+static void uTextLatin1Close(UText* uText)
+{
+ uText->context = nullptr;
+}
+
+UText* openLatin1UTextProvider(UTextWithBuffer* utWithBuffer, const LChar* string, unsigned length, UErrorCode* status)
+{
+ if (U_FAILURE(*status))
+ return nullptr;
+ if (!string || length > static_cast<unsigned>(std::numeric_limits<int32_t>::max())) {
+ *status = U_ILLEGAL_ARGUMENT_ERROR;
+ return nullptr;
+ }
+ UText* text = utext_setup(&utWithBuffer->text, sizeof(utWithBuffer->buffer), status);
+ if (U_FAILURE(*status)) {
+ ASSERT(!text);
+ return nullptr;
+ }
+
+ text->context = string;
+ text->a = length;
+ text->pFuncs = &uTextLatin1Funcs;
+ text->chunkContents = (UChar*)text->pExtra;
+ memset(const_cast<UChar*>(text->chunkContents), 0, sizeof(UChar) * UTextWithBufferInlineCapacity);
+
+ return text;
+}
+
+
+// Latin1ContextAware provider
+
+static UText* uTextLatin1ContextAwareClone(UText*, const UText*, UBool, UErrorCode*);
+static int64_t uTextLatin1ContextAwareNativeLength(UText*);
+static UBool uTextLatin1ContextAwareAccess(UText*, int64_t, UBool);
+static int32_t uTextLatin1ContextAwareExtract(UText*, int64_t, int64_t, UChar*, int32_t, UErrorCode*);
+static void uTextLatin1ContextAwareClose(UText*);
+
+static const struct UTextFuncs textLatin1ContextAwareFuncs = {
+ sizeof(UTextFuncs),
+ 0,
+ 0,
+ 0,
+ uTextLatin1ContextAwareClone,
+ uTextLatin1ContextAwareNativeLength,
+ uTextLatin1ContextAwareAccess,
+ uTextLatin1ContextAwareExtract,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ uTextLatin1ContextAwareClose,
+ nullptr,
+ nullptr,
+ nullptr
+};
+
+static inline UTextProviderContext textLatin1ContextAwareGetCurrentContext(const UText* text)
+{
+ if (!text->chunkContents)
+ return UTextProviderContext::NoContext;
+ return text->chunkContents == text->pExtra ? UTextProviderContext::PrimaryContext : UTextProviderContext::PriorContext;
+}
+
+static void textLatin1ContextAwareMoveInPrimaryContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(text->chunkContents == text->pExtra);
+ if (forward) {
+ ASSERT(nativeIndex >= text->b && nativeIndex < nativeLength);
+ text->chunkNativeStart = nativeIndex;
+ text->chunkNativeLimit = nativeIndex + text->extraSize / sizeof(UChar);
+ if (text->chunkNativeLimit > nativeLength)
+ text->chunkNativeLimit = nativeLength;
+ } else {
+ ASSERT(nativeIndex > text->b && nativeIndex <= nativeLength);
+ text->chunkNativeLimit = nativeIndex;
+ text->chunkNativeStart = nativeIndex - text->extraSize / sizeof(UChar);
+ if (text->chunkNativeStart < text->b)
+ text->chunkNativeStart = text->b;
+ }
+ int64_t length = text->chunkNativeLimit - text->chunkNativeStart;
+ // Ensure chunk length is well defined if computed length exceeds int32_t range.
+ ASSERT(length < std::numeric_limits<int32_t>::max());
+ text->chunkLength = length < std::numeric_limits<int32_t>::max() ? static_cast<int32_t>(length) : 0;
+ text->nativeIndexingLimit = text->chunkLength;
+ text->chunkOffset = forward ? 0 : text->chunkLength;
+ StringImpl::copyChars(const_cast<UChar*>(text->chunkContents), static_cast<const LChar*>(text->p) + (text->chunkNativeStart - text->b), static_cast<unsigned>(text->chunkLength));
+}
+
+static void textLatin1ContextAwareSwitchToPrimaryContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(!text->chunkContents || text->chunkContents == text->q);
+ text->chunkContents = static_cast<const UChar*>(text->pExtra);
+ textLatin1ContextAwareMoveInPrimaryContext(text, nativeIndex, nativeLength, forward);
+}
+
+static void textLatin1ContextAwareMoveInPriorContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(text->chunkContents == text->q);
+ ASSERT(forward ? nativeIndex < text->b : nativeIndex <= text->b);
+ ASSERT_UNUSED(nativeLength, forward ? nativeIndex < nativeLength : nativeIndex <= nativeLength);
+ ASSERT_UNUSED(forward, forward ? nativeIndex < nativeLength : nativeIndex <= nativeLength);
+ text->chunkNativeStart = 0;
+ text->chunkNativeLimit = text->b;
+ text->chunkLength = text->b;
+ text->nativeIndexingLimit = text->chunkLength;
+ int64_t offset = nativeIndex - text->chunkNativeStart;
+ // Ensure chunk offset is well defined if computed offset exceeds int32_t range or chunk length.
+ ASSERT(offset < std::numeric_limits<int32_t>::max());
+ text->chunkOffset = std::min(offset < std::numeric_limits<int32_t>::max() ? static_cast<int32_t>(offset) : 0, text->chunkLength);
+}
+
+static void textLatin1ContextAwareSwitchToPriorContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(!text->chunkContents || text->chunkContents == text->pExtra);
+ text->chunkContents = static_cast<const UChar*>(text->q);
+ textLatin1ContextAwareMoveInPriorContext(text, nativeIndex, nativeLength, forward);
+}
+
+static UText* uTextLatin1ContextAwareClone(UText* destination, const UText* source, UBool deep, UErrorCode* status)
+{
+ return uTextCloneImpl(destination, source, deep, status);
+}
+
+static int64_t uTextLatin1ContextAwareNativeLength(UText* text)
+{
+ return text->a + text->b;
+}
+
+static UBool uTextLatin1ContextAwareAccess(UText* text, int64_t nativeIndex, UBool forward)
+{
+ if (!text->context)
+ return FALSE;
+ int64_t nativeLength = uTextLatin1ContextAwareNativeLength(text);
+ UBool isAccessible;
+ if (uTextAccessInChunkOrOutOfRange(text, nativeIndex, nativeLength, forward, isAccessible))
+ return isAccessible;
+ nativeIndex = uTextAccessPinIndex(nativeIndex, nativeLength);
+ UTextProviderContext currentContext = textLatin1ContextAwareGetCurrentContext(text);
+ UTextProviderContext newContext = uTextProviderContext(text, nativeIndex, forward);
+ ASSERT(newContext != UTextProviderContext::NoContext);
+ if (newContext == currentContext) {
+ if (currentContext == UTextProviderContext::PrimaryContext)
+ textLatin1ContextAwareMoveInPrimaryContext(text, nativeIndex, nativeLength, forward);
+ else
+ textLatin1ContextAwareMoveInPriorContext(text, nativeIndex, nativeLength, forward);
+ } else if (newContext == UTextProviderContext::PrimaryContext)
+ textLatin1ContextAwareSwitchToPrimaryContext(text, nativeIndex, nativeLength, forward);
+ else {
+ ASSERT(newContext == UTextProviderContext::PriorContext);
+ textLatin1ContextAwareSwitchToPriorContext(text, nativeIndex, nativeLength, forward);
+ }
+ return TRUE;
+}
+
+static int32_t uTextLatin1ContextAwareExtract(UText*, int64_t, int64_t, UChar*, int32_t, UErrorCode* errorCode)
+{
+ // In the present context, this text provider is used only with ICU functions
+ // that do not perform an extract operation.
+ ASSERT_NOT_REACHED();
+ *errorCode = U_UNSUPPORTED_ERROR;
+ return 0;
+}
+
+static void uTextLatin1ContextAwareClose(UText* text)
+{
+ text->context = nullptr;
+}
+
+UText* openLatin1ContextAwareUTextProvider(UTextWithBuffer* utWithBuffer, const LChar* string, unsigned length, const UChar* priorContext, int priorContextLength, UErrorCode* status)
+{
+ if (U_FAILURE(*status))
+ return 0;
+ if (!string || length > static_cast<unsigned>(std::numeric_limits<int32_t>::max())) {
+ *status = U_ILLEGAL_ARGUMENT_ERROR;
+ return 0;
+ }
+ UText* text = utext_setup(&utWithBuffer->text, sizeof(utWithBuffer->buffer), status);
+ if (U_FAILURE(*status)) {
+ ASSERT(!text);
+ return 0;
+ }
+
+ initializeContextAwareUTextProvider(text, &textLatin1ContextAwareFuncs, string, length, priorContext, priorContextLength);
+ return text;
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/text/icu/UTextProviderLatin1.h b/Source/WTF/wtf/text/icu/UTextProviderLatin1.h
new file mode 100644
index 000000000..f17b34d56
--- /dev/null
+++ b/Source/WTF/wtf/text/icu/UTextProviderLatin1.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTextProviderLatin1_h
+#define UTextProviderLatin1_h
+
+#include <unicode/utext.h>
+#include <wtf/text/LChar.h>
+
+namespace WTF {
+
+const int UTextWithBufferInlineCapacity = 16;
+
+struct UTextWithBuffer {
+ UText text;
+ UChar buffer[UTextWithBufferInlineCapacity];
+};
+
+UText* openLatin1UTextProvider(UTextWithBuffer* utWithBuffer, const LChar* string, unsigned length, UErrorCode* status);
+UText* openLatin1ContextAwareUTextProvider(UTextWithBuffer* utWithBuffer, const LChar* string, unsigned length, const UChar* priorContext, int priorContextLength, UErrorCode* status);
+
+} // namespace WTF
+
+#endif // UTextProviderLatin1_h
diff --git a/Source/WTF/wtf/text/icu/UTextProviderUTF16.cpp b/Source/WTF/wtf/text/icu/UTextProviderUTF16.cpp
new file mode 100644
index 000000000..e1fc2eab9
--- /dev/null
+++ b/Source/WTF/wtf/text/icu/UTextProviderUTF16.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UTextProviderUTF16.h"
+
+#include "UTextProvider.h"
+#include <algorithm>
+
+namespace WTF {
+
+// UTF16ContextAware provider
+
+static UText* uTextUTF16ContextAwareClone(UText*, const UText*, UBool, UErrorCode*);
+static int64_t uTextUTF16ContextAwareNativeLength(UText*);
+static UBool uTextUTF16ContextAwareAccess(UText*, int64_t, UBool);
+static int32_t uTextUTF16ContextAwareExtract(UText*, int64_t, int64_t, UChar*, int32_t, UErrorCode*);
+static void uTextUTF16ContextAwareClose(UText*);
+
+static const struct UTextFuncs textUTF16ContextAwareFuncs = {
+ sizeof(UTextFuncs),
+ 0,
+ 0,
+ 0,
+ uTextUTF16ContextAwareClone,
+ uTextUTF16ContextAwareNativeLength,
+ uTextUTF16ContextAwareAccess,
+ uTextUTF16ContextAwareExtract,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ uTextUTF16ContextAwareClose,
+ nullptr,
+ nullptr,
+ nullptr
+};
+
+static inline UTextProviderContext textUTF16ContextAwareGetCurrentContext(const UText* text)
+{
+ if (!text->chunkContents)
+ return UTextProviderContext::NoContext;
+ return text->chunkContents == text->p ? UTextProviderContext::PrimaryContext : UTextProviderContext::PriorContext;
+}
+
+static void textUTF16ContextAwareMoveInPrimaryContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(text->chunkContents == text->p);
+ ASSERT_UNUSED(forward, forward ? nativeIndex >= text->b : nativeIndex > text->b);
+ ASSERT_UNUSED(forward, forward ? nativeIndex < nativeLength : nativeIndex <= nativeLength);
+ text->chunkNativeStart = text->b;
+ text->chunkNativeLimit = nativeLength;
+ int64_t length = text->chunkNativeLimit - text->chunkNativeStart;
+ // Ensure chunk length is well defined if computed length exceeds int32_t range.
+ ASSERT(length < std::numeric_limits<int32_t>::max());
+ text->chunkLength = length < std::numeric_limits<int32_t>::max() ? static_cast<int32_t>(length) : 0;
+ text->nativeIndexingLimit = text->chunkLength;
+ int64_t offset = nativeIndex - text->chunkNativeStart;
+ // Ensure chunk offset is well defined if computed offset exceeds int32_t range or chunk length.
+ ASSERT(offset < std::numeric_limits<int32_t>::max());
+ text->chunkOffset = std::min(offset < std::numeric_limits<int32_t>::max() ? static_cast<int32_t>(offset) : 0, text->chunkLength);
+}
+
+static void textUTF16ContextAwareSwitchToPrimaryContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(!text->chunkContents || text->chunkContents == text->q);
+ text->chunkContents = static_cast<const UChar*>(text->p);
+ textUTF16ContextAwareMoveInPrimaryContext(text, nativeIndex, nativeLength, forward);
+}
+
+static void textUTF16ContextAwareMoveInPriorContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(text->chunkContents == text->q);
+ ASSERT(forward ? nativeIndex < text->b : nativeIndex <= text->b);
+ ASSERT_UNUSED(nativeLength, forward ? nativeIndex < nativeLength : nativeIndex <= nativeLength);
+ ASSERT_UNUSED(forward, forward ? nativeIndex < nativeLength : nativeIndex <= nativeLength);
+ text->chunkNativeStart = 0;
+ text->chunkNativeLimit = text->b;
+ text->chunkLength = text->b;
+ text->nativeIndexingLimit = text->chunkLength;
+ int64_t offset = nativeIndex - text->chunkNativeStart;
+ // Ensure chunk offset is well defined if computed offset exceeds int32_t range or chunk length.
+ ASSERT(offset < std::numeric_limits<int32_t>::max());
+ text->chunkOffset = std::min(offset < std::numeric_limits<int32_t>::max() ? static_cast<int32_t>(offset) : 0, text->chunkLength);
+}
+
+static void textUTF16ContextAwareSwitchToPriorContext(UText* text, int64_t nativeIndex, int64_t nativeLength, UBool forward)
+{
+ ASSERT(!text->chunkContents || text->chunkContents == text->p);
+ text->chunkContents = static_cast<const UChar*>(text->q);
+ textUTF16ContextAwareMoveInPriorContext(text, nativeIndex, nativeLength, forward);
+}
+
+static UText* uTextUTF16ContextAwareClone(UText* destination, const UText* source, UBool deep, UErrorCode* status)
+{
+ return uTextCloneImpl(destination, source, deep, status);
+}
+
+static inline int64_t uTextUTF16ContextAwareNativeLength(UText* text)
+{
+ return text->a + text->b;
+}
+
+static UBool uTextUTF16ContextAwareAccess(UText* text, int64_t nativeIndex, UBool forward)
+{
+ if (!text->context)
+ return FALSE;
+ int64_t nativeLength = uTextUTF16ContextAwareNativeLength(text);
+ UBool isAccessible;
+ if (uTextAccessInChunkOrOutOfRange(text, nativeIndex, nativeLength, forward, isAccessible))
+ return isAccessible;
+ nativeIndex = uTextAccessPinIndex(nativeIndex, nativeLength);
+ UTextProviderContext currentContext = textUTF16ContextAwareGetCurrentContext(text);
+ UTextProviderContext newContext = uTextProviderContext(text, nativeIndex, forward);
+ ASSERT(newContext != UTextProviderContext::NoContext);
+ if (newContext == currentContext) {
+ if (currentContext == UTextProviderContext::PrimaryContext)
+ textUTF16ContextAwareMoveInPrimaryContext(text, nativeIndex, nativeLength, forward);
+ else
+ textUTF16ContextAwareMoveInPriorContext(text, nativeIndex, nativeLength, forward);
+ } else if (newContext == UTextProviderContext::PrimaryContext)
+ textUTF16ContextAwareSwitchToPrimaryContext(text, nativeIndex, nativeLength, forward);
+ else {
+ ASSERT(newContext == UTextProviderContext::PriorContext);
+ textUTF16ContextAwareSwitchToPriorContext(text, nativeIndex, nativeLength, forward);
+ }
+ return TRUE;
+}
+
+static int32_t uTextUTF16ContextAwareExtract(UText*, int64_t, int64_t, UChar*, int32_t, UErrorCode* errorCode)
+{
+ // In the present context, this text provider is used only with ICU functions
+ // that do not perform an extract operation.
+ ASSERT_NOT_REACHED();
+ *errorCode = U_UNSUPPORTED_ERROR;
+ return 0;
+}
+
+static void uTextUTF16ContextAwareClose(UText* text)
+{
+ text->context = nullptr;
+}
+
+UText* openUTF16ContextAwareUTextProvider(UText* text, const UChar* string, unsigned length, const UChar* priorContext, int priorContextLength, UErrorCode* status)
+{
+ if (U_FAILURE(*status))
+ return 0;
+ if (!string || length > static_cast<unsigned>(std::numeric_limits<int32_t>::max())) {
+ *status = U_ILLEGAL_ARGUMENT_ERROR;
+ return 0;
+ }
+ text = utext_setup(text, 0, status);
+ if (U_FAILURE(*status)) {
+ ASSERT(!text);
+ return 0;
+ }
+
+ initializeContextAwareUTextProvider(text, &textUTF16ContextAwareFuncs, string, length, priorContext, priorContextLength);
+ return text;
+}
+
+} // namespace WTF
diff --git a/Source/WTF/wtf/text/icu/UTextProviderUTF16.h b/Source/WTF/wtf/text/icu/UTextProviderUTF16.h
new file mode 100644
index 000000000..bcc2c2c8e
--- /dev/null
+++ b/Source/WTF/wtf/text/icu/UTextProviderUTF16.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTextProviderUTF16_h
+#define UTextProviderUTF16_h
+
+#include <unicode/utext.h>
+
+namespace WTF {
+
+UText* openUTF16ContextAwareUTextProvider(UText*, const UChar*, unsigned length, const UChar* priorContext, int priorContextLength, UErrorCode*);
+
+} // namespace WTF
+
+#endif // UTextProviderUTF16_h
diff --git a/Source/WTF/wtf/text/unix/TextBreakIteratorInternalICUUnix.cpp b/Source/WTF/wtf/text/unix/TextBreakIteratorInternalICUUnix.cpp
new file mode 100644
index 000000000..44983421c
--- /dev/null
+++ b/Source/WTF/wtf/text/unix/TextBreakIteratorInternalICUUnix.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 Igalia S.L.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include "config.h"
+#include "TextBreakIteratorInternalICU.h"
+
+#include <locale.h>
+
+namespace WTF {
+
+const char* currentSearchLocaleID()
+{
+ if (auto* localeDefault = setlocale(LC_MESSAGES, nullptr))
+ return localeDefault;
+ return "";
+}
+
+const char* currentTextBreakLocaleID()
+{
+ if (auto* localeDefault = setlocale(LC_MESSAGES, nullptr))
+ return localeDefault;
+ return "en_us";
+}
+
+}
diff --git a/Source/WTF/wtf/threads/BinarySemaphore.cpp b/Source/WTF/wtf/threads/BinarySemaphore.cpp
index 5f60aaf22..f4452496a 100644
--- a/Source/WTF/wtf/threads/BinarySemaphore.cpp
+++ b/Source/WTF/wtf/threads/BinarySemaphore.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +26,6 @@
#include "config.h"
#include "BinarySemaphore.h"
-#if !PLATFORM(WIN)
-
namespace WTF {
BinarySemaphore::BinarySemaphore()
@@ -47,13 +45,14 @@ void BinarySemaphore::signal()
m_condition.signal();
}
-bool BinarySemaphore::wait(double absoluteTime)
+bool BinarySemaphore::wait(TimeWithDynamicClockType absoluteTime)
{
MutexLocker locker(m_mutex);
bool timedOut = false;
while (!m_isSet) {
- timedOut = !m_condition.timedWait(m_mutex, absoluteTime);
+ timedOut = !m_condition.timedWait(
+ m_mutex, absoluteTime.approximateWallTime().secondsSinceEpoch().value());
if (timedOut)
return false;
}
@@ -64,5 +63,3 @@ bool BinarySemaphore::wait(double absoluteTime)
}
} // namespace WTF
-
-#endif // !PLATFORM(WIN)
diff --git a/Source/WTF/wtf/threads/BinarySemaphore.h b/Source/WTF/wtf/threads/BinarySemaphore.h
index cae10231b..bb00d776f 100644
--- a/Source/WTF/wtf/threads/BinarySemaphore.h
+++ b/Source/WTF/wtf/threads/BinarySemaphore.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2010-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,6 +28,7 @@
#include <wtf/Noncopyable.h>
#include <wtf/ThreadingPrimitives.h>
+#include <wtf/TimeWithDynamicClockType.h>
namespace WTF {
@@ -39,21 +40,13 @@ public:
WTF_EXPORT_PRIVATE ~BinarySemaphore();
WTF_EXPORT_PRIVATE void signal();
- WTF_EXPORT_PRIVATE bool wait(double absoluteTime);
-
-#if PLATFORM(WIN)
- HANDLE event() const { return m_event; }
-#endif
+ WTF_EXPORT_PRIVATE bool wait(TimeWithDynamicClockType);
private:
-#if PLATFORM(WIN)
- HANDLE m_event;
-#else
bool m_isSet;
Mutex m_mutex;
ThreadCondition m_condition;
-#endif
};
} // namespace WTF
diff --git a/Source/WTF/wtf/unicode/CharacterNames.h b/Source/WTF/wtf/unicode/CharacterNames.h
index 8fc2fc28f..1c415be34 100644
--- a/Source/WTF/wtf/unicode/CharacterNames.h
+++ b/Source/WTF/wtf/unicode/CharacterNames.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -26,7 +26,7 @@
#ifndef CharacterNames_h
#define CharacterNames_h
-#include <wtf/unicode/Unicode.h>
+#include <unicode/utypes.h>
namespace WTF {
namespace Unicode {
@@ -37,8 +37,9 @@ namespace Unicode {
// more convenient for WebCore code that mostly uses UTF-16.
const UChar AppleLogo = 0xF8FF;
-const UChar32 aegeanWordSeparatorLine = 0x10100;
+const UChar HiraganaLetterSmallA = 0x3041;
const UChar32 aegeanWordSeparatorDot = 0x10101;
+const UChar32 aegeanWordSeparatorLine = 0x10100;
const UChar apostrophe = 0x0027;
const UChar blackCircle = 0x25CF;
const UChar blackSquare = 0x25A0;
@@ -48,11 +49,10 @@ const UChar bullseye = 0x25CE;
const UChar carriageReturn = 0x000D;
const UChar ethiopicPrefaceColon = 0x1366;
const UChar ethiopicWordspace = 0x1361;
+const UChar firstStrongIsolate = 0x2068;
const UChar fisheye = 0x25C9;
-const UChar quotationMark = 0x0022;
const UChar hebrewPunctuationGeresh = 0x05F3;
const UChar hebrewPunctuationGershayim = 0x05F4;
-const UChar HiraganaLetterSmallA = 0x3041;
const UChar horizontalEllipsis = 0x2026;
const UChar hyphen = 0x2010;
const UChar hyphenMinus = 0x002D;
@@ -62,25 +62,31 @@ const UChar ideographicSpace = 0x3000;
const UChar leftDoubleQuotationMark = 0x201C;
const UChar leftSingleQuotationMark = 0x2018;
const UChar leftToRightEmbed = 0x202A;
+const UChar leftToRightIsolate = 0x2066;
const UChar leftToRightMark = 0x200E;
const UChar leftToRightOverride = 0x202D;
const UChar minusSign = 0x2212;
+const UChar narrowNoBreakSpace = 0x202F;
+const UChar narrowNonBreakingSpace = 0x202F;
const UChar newlineCharacter = 0x000A;
const UChar noBreakSpace = 0x00A0;
const UChar objectReplacementCharacter = 0xFFFC;
const UChar popDirectionalFormatting = 0x202C;
+const UChar popDirectionalIsolate = 0x2069;
+const UChar quotationMark = 0x0022;
const UChar replacementCharacter = 0xFFFD;
const UChar rightDoubleQuotationMark = 0x201D;
const UChar rightSingleQuotationMark = 0x2019;
const UChar rightToLeftEmbed = 0x202B;
+const UChar rightToLeftIsolate = 0x2067;
const UChar rightToLeftMark = 0x200F;
const UChar rightToLeftOverride = 0x202E;
const UChar sesameDot = 0xFE45;
const UChar smallLetterSharpS = 0x00DF;
const UChar softHyphen = 0x00AD;
const UChar space = 0x0020;
-const UChar tibetanMarkIntersyllabicTsheg = 0x0F0B;
const UChar tibetanMarkDelimiterTshegBstar = 0x0F0C;
+const UChar tibetanMarkIntersyllabicTsheg = 0x0F0B;
const UChar32 ugariticWordDivider = 0x1039F;
const UChar whiteBullet = 0x25E6;
const UChar whiteCircle = 0x25CB;
@@ -88,16 +94,17 @@ const UChar whiteSesameDot = 0xFE46;
const UChar whiteUpPointingTriangle = 0x25B3;
const UChar yenSign = 0x00A5;
const UChar zeroWidthJoiner = 0x200D;
+const UChar zeroWidthNoBreakSpace = 0xFEFF;
const UChar zeroWidthNonJoiner = 0x200C;
const UChar zeroWidthSpace = 0x200B;
-const UChar zeroWidthNoBreakSpace = 0xFEFF;
} // namespace Unicode
} // namespace WTF
using WTF::Unicode::AppleLogo;
-using WTF::Unicode::aegeanWordSeparatorLine;
+using WTF::Unicode::HiraganaLetterSmallA;
using WTF::Unicode::aegeanWordSeparatorDot;
+using WTF::Unicode::aegeanWordSeparatorLine;
using WTF::Unicode::blackCircle;
using WTF::Unicode::blackSquare;
using WTF::Unicode::blackUpPointingTriangle;
@@ -106,10 +113,10 @@ using WTF::Unicode::bullseye;
using WTF::Unicode::carriageReturn;
using WTF::Unicode::ethiopicPrefaceColon;
using WTF::Unicode::ethiopicWordspace;
+using WTF::Unicode::firstStrongIsolate;
using WTF::Unicode::fisheye;
using WTF::Unicode::hebrewPunctuationGeresh;
using WTF::Unicode::hebrewPunctuationGershayim;
-using WTF::Unicode::HiraganaLetterSmallA;
using WTF::Unicode::horizontalEllipsis;
using WTF::Unicode::hyphen;
using WTF::Unicode::hyphenMinus;
@@ -119,24 +126,29 @@ using WTF::Unicode::ideographicSpace;
using WTF::Unicode::leftDoubleQuotationMark;
using WTF::Unicode::leftSingleQuotationMark;
using WTF::Unicode::leftToRightEmbed;
+using WTF::Unicode::leftToRightIsolate;
using WTF::Unicode::leftToRightMark;
using WTF::Unicode::leftToRightOverride;
using WTF::Unicode::minusSign;
+using WTF::Unicode::narrowNoBreakSpace;
+using WTF::Unicode::narrowNonBreakingSpace;
using WTF::Unicode::newlineCharacter;
using WTF::Unicode::noBreakSpace;
using WTF::Unicode::objectReplacementCharacter;
using WTF::Unicode::popDirectionalFormatting;
+using WTF::Unicode::popDirectionalIsolate;
using WTF::Unicode::replacementCharacter;
using WTF::Unicode::rightDoubleQuotationMark;
using WTF::Unicode::rightSingleQuotationMark;
using WTF::Unicode::rightToLeftEmbed;
+using WTF::Unicode::rightToLeftIsolate;
using WTF::Unicode::rightToLeftMark;
using WTF::Unicode::rightToLeftOverride;
using WTF::Unicode::sesameDot;
using WTF::Unicode::softHyphen;
using WTF::Unicode::space;
-using WTF::Unicode::tibetanMarkIntersyllabicTsheg;
using WTF::Unicode::tibetanMarkDelimiterTshegBstar;
+using WTF::Unicode::tibetanMarkIntersyllabicTsheg;
using WTF::Unicode::ugariticWordDivider;
using WTF::Unicode::whiteBullet;
using WTF::Unicode::whiteCircle;
@@ -144,8 +156,8 @@ using WTF::Unicode::whiteSesameDot;
using WTF::Unicode::whiteUpPointingTriangle;
using WTF::Unicode::yenSign;
using WTF::Unicode::zeroWidthJoiner;
+using WTF::Unicode::zeroWidthNoBreakSpace;
using WTF::Unicode::zeroWidthNonJoiner;
using WTF::Unicode::zeroWidthSpace;
-using WTF::Unicode::zeroWidthNoBreakSpace;
#endif // CharacterNames_h
diff --git a/Source/WTF/wtf/unicode/Collator.h b/Source/WTF/wtf/unicode/Collator.h
index 67486c7d0..75ff477a5 100644
--- a/Source/WTF/wtf/unicode/Collator.h
+++ b/Source/WTF/wtf/unicode/Collator.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -29,36 +29,49 @@
#ifndef WTF_Collator_h
#define WTF_Collator_h
+#include <unicode/uconfig.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/unicode/Unicode.h>
+struct UCharIterator;
struct UCollator;
namespace WTF {
- class Collator {
- WTF_MAKE_NONCOPYABLE(Collator); WTF_MAKE_FAST_ALLOCATED;
- public:
- enum Result { Equal = 0, Greater = 1, Less = -1 };
+class StringView;
- WTF_EXPORT_PRIVATE Collator(const char* locale); // Parsing is lenient; e.g. language identifiers (such as "en-US") are accepted, too.
- WTF_EXPORT_PRIVATE ~Collator();
- WTF_EXPORT_PRIVATE void setOrderLowerFirst(bool);
+#if UCONFIG_NO_COLLATION
- WTF_EXPORT_PRIVATE static std::unique_ptr<Collator> userDefault();
+class Collator {
+public:
+ explicit Collator(const char* = nullptr, bool = false) { }
- WTF_EXPORT_PRIVATE Result collate(const ::UChar*, size_t, const ::UChar*, size_t) const;
+ WTF_EXPORT_PRIVATE static int collate(StringView, StringView);
+ WTF_EXPORT_PRIVATE static int collateUTF8(const char*, const char*);
+};
+
+#else
+
+class Collator {
+ WTF_MAKE_NONCOPYABLE(Collator);
+public:
+ // The value nullptr is a special one meaning the system default locale.
+ // Locale name parsing is lenient; e.g. language identifiers (such as "en-US") are accepted, too.
+ WTF_EXPORT_PRIVATE explicit Collator(const char* locale = nullptr, bool shouldSortLowercaseFirst = false);
+ WTF_EXPORT_PRIVATE ~Collator();
+
+ WTF_EXPORT_PRIVATE int collate(StringView, StringView) const;
+ WTF_EXPORT_PRIVATE int collateUTF8(const char*, const char*) const;
+
+private:
+ char* m_locale;
+ bool m_shouldSortLowercaseFirst;
+ UCollator* m_collator;
+};
+
+WTF_EXPORT_PRIVATE UCharIterator createIterator(StringView);
- private:
-#if !UCONFIG_NO_COLLATION
- void createCollator() const;
- void releaseCollator();
- mutable UCollator* m_collator;
#endif
- char* m_locale;
- bool m_lowerFirst;
- };
+
}
using WTF::Collator;
diff --git a/Source/WTF/wtf/unicode/CollatorDefault.cpp b/Source/WTF/wtf/unicode/CollatorDefault.cpp
index d56bb5a38..eab171bf2 100644
--- a/Source/WTF/wtf/unicode/CollatorDefault.cpp
+++ b/Source/WTF/wtf/unicode/CollatorDefault.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,41 +33,27 @@
namespace WTF {
-Collator::Collator(const char*)
+int Collator::collate(StringView a, StringView b) const
{
-}
-
-Collator::~Collator()
-{
-}
+ unsigned commonLength = std::min(a.length(), b.length());
+ for (unsigned i = 0; i < commonLength; ++i) {
+ if (a[i] < b[i])
+ return -1;
+ if (a[i] > b[i])
+ return 1;
+ }
-void Collator::setOrderLowerFirst(bool)
-{
-}
+ if (a.length() < b.length())
+ return -1;
+ if (a.length() > b.length())
+ return 1;
-std::unique_ptr<Collator> Collator::userDefault()
-{
- return std::make_unique<Collator>(nullptr);
+ return 0;
}
-// A default implementation for platforms that lack Unicode-aware collation.
-Collator::Result Collator::collate(const UChar* lhs, size_t lhsLength, const UChar* rhs, size_t rhsLength) const
+int Collator::collateUTF8(const char* a, const char* b) const
{
- int lmin = lhsLength < rhsLength ? lhsLength : rhsLength;
- int l = 0;
- while (l < lmin && *lhs == *rhs) {
- lhs++;
- rhs++;
- l++;
- }
-
- if (l < lmin)
- return (*lhs > *rhs) ? Greater : Less;
-
- if (lhsLength == rhsLength)
- return Equal;
-
- return (lhsLength > rhsLength) ? Greater : Less;
+ return collate(String::fromUTF8(a), String::fromUTF8(b));
}
}
diff --git a/Source/WTF/wtf/unicode/ScriptCodesFromICU.h b/Source/WTF/wtf/unicode/ScriptCodesFromICU.h
deleted file mode 100644
index 4760399a1..000000000
--- a/Source/WTF/wtf/unicode/ScriptCodesFromICU.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (C) 1997-2006, International Business Machines
- * Corporation and others. All Rights Reserved.
- */
-
-#ifndef WTF_ScriptCodesFromICU_h
-#define WTF_ScriptCodesFromICU_h
-
-/**
- * Constants for ISO 15924 script codes.
- *
- * Many of these script codes - those from Unicode's ScriptNames.txt -
- * are character property values for Unicode's Script property.
- * See UAX #24 Script Names (http://www.unicode.org/reports/tr24/).
- *
- * Starting with ICU 3.6, constants for most ISO 15924 script codes
- * are included (currently excluding private-use codes Qaaa..Qabx).
- * For scripts for which there are codes in ISO 15924 but which are not
- * used in the Unicode Character Database (UCD), there are no Unicode characters
- * associated with those scripts.
- *
- * For example, there are no characters that have a UCD script code of
- * Hans or Hant. All Han ideographs have the Hani script code.
- * The Hans and Hant script codes are used with CLDR data.
- *
- * ISO 15924 script codes are included for use with CLDR and similar.
- *
- * @stable ICU 2.2
- */
-typedef enum UScriptCode {
- USCRIPT_INVALID_CODE = -1,
- USCRIPT_COMMON = 0 , /* Zyyy */
- USCRIPT_INHERITED = 1, /* Qaai */
- USCRIPT_ARABIC = 2, /* Arab */
- USCRIPT_ARMENIAN = 3, /* Armn */
- USCRIPT_BENGALI = 4, /* Beng */
- USCRIPT_BOPOMOFO = 5, /* Bopo */
- USCRIPT_CHEROKEE = 6, /* Cher */
- USCRIPT_COPTIC = 7, /* Copt */
- USCRIPT_CYRILLIC = 8, /* Cyrl */
- USCRIPT_DESERET = 9, /* Dsrt */
- USCRIPT_DEVANAGARI = 10, /* Deva */
- USCRIPT_ETHIOPIC = 11, /* Ethi */
- USCRIPT_GEORGIAN = 12, /* Geor */
- USCRIPT_GOTHIC = 13, /* Goth */
- USCRIPT_GREEK = 14, /* Grek */
- USCRIPT_GUJARATI = 15, /* Gujr */
- USCRIPT_GURMUKHI = 16, /* Guru */
- USCRIPT_HAN = 17, /* Hani */
- USCRIPT_HANGUL = 18, /* Hang */
- USCRIPT_HEBREW = 19, /* Hebr */
- USCRIPT_HIRAGANA = 20, /* Hira */
- USCRIPT_KANNADA = 21, /* Knda */
- USCRIPT_KATAKANA = 22, /* Kana */
- USCRIPT_KHMER = 23, /* Khmr */
- USCRIPT_LAO = 24, /* Laoo */
- USCRIPT_LATIN = 25, /* Latn */
- USCRIPT_MALAYALAM = 26, /* Mlym */
- USCRIPT_MONGOLIAN = 27, /* Mong */
- USCRIPT_MYANMAR = 28, /* Mymr */
- USCRIPT_OGHAM = 29, /* Ogam */
- USCRIPT_OLD_ITALIC = 30, /* Ital */
- USCRIPT_ORIYA = 31, /* Orya */
- USCRIPT_RUNIC = 32, /* Runr */
- USCRIPT_SINHALA = 33, /* Sinh */
- USCRIPT_SYRIAC = 34, /* Syrc */
- USCRIPT_TAMIL = 35, /* Taml */
- USCRIPT_TELUGU = 36, /* Telu */
- USCRIPT_THAANA = 37, /* Thaa */
- USCRIPT_THAI = 38, /* Thai */
- USCRIPT_TIBETAN = 39, /* Tibt */
- /** Canadian_Aboriginal script. @stable ICU 2.6 */
- USCRIPT_CANADIAN_ABORIGINAL = 40, /* Cans */
- /** Canadian_Aboriginal script (alias). @stable ICU 2.2 */
- USCRIPT_UCAS = USCRIPT_CANADIAN_ABORIGINAL,
- USCRIPT_YI = 41, /* Yiii */
- USCRIPT_TAGALOG = 42, /* Tglg */
- USCRIPT_HANUNOO = 43, /* Hano */
- USCRIPT_BUHID = 44, /* Buhd */
- USCRIPT_TAGBANWA = 45, /* Tagb */
-
- /* New scripts in Unicode 4 @stable ICU 2.6 */
- USCRIPT_BRAILLE = 46, /* Brai */
- USCRIPT_CYPRIOT = 47, /* Cprt */
- USCRIPT_LIMBU = 48, /* Limb */
- USCRIPT_LINEAR_B = 49, /* Linb */
- USCRIPT_OSMANYA = 50, /* Osma */
- USCRIPT_SHAVIAN = 51, /* Shaw */
- USCRIPT_TAI_LE = 52, /* Tale */
- USCRIPT_UGARITIC = 53, /* Ugar */
-
- /** New script code in Unicode 4.0.1 @stable ICU 3.0 */
- USCRIPT_KATAKANA_OR_HIRAGANA = 54,/*Hrkt */
-
-#ifndef U_HIDE_DRAFT_API
- /* New scripts in Unicode 4.1 @draft ICU 3.4 */
- USCRIPT_BUGINESE = 55, /* Bugi */
- USCRIPT_GLAGOLITIC = 56, /* Glag */
- USCRIPT_KHAROSHTHI = 57, /* Khar */
- USCRIPT_SYLOTI_NAGRI = 58, /* Sylo */
- USCRIPT_NEW_TAI_LUE = 59, /* Talu */
- USCRIPT_TIFINAGH = 60, /* Tfng */
- USCRIPT_OLD_PERSIAN = 61, /* Xpeo */
-
- /* New script codes from ISO 15924 @draft ICU 3.6 */
- USCRIPT_BALINESE = 62, /* Bali */
- USCRIPT_BATAK = 63, /* Batk */
- USCRIPT_BLISSYMBOLS = 64, /* Blis */
- USCRIPT_BRAHMI = 65, /* Brah */
- USCRIPT_CHAM = 66, /* Cham */
- USCRIPT_CIRTH = 67, /* Cirt */
- USCRIPT_OLD_CHURCH_SLAVONIC_CYRILLIC = 68, /* Cyrs */
- USCRIPT_DEMOTIC_EGYPTIAN = 69, /* Egyd */
- USCRIPT_HIERATIC_EGYPTIAN = 70, /* Egyh */
- USCRIPT_EGYPTIAN_HIEROGLYPHS = 71, /* Egyp */
- USCRIPT_KHUTSURI = 72, /* Geok */
- USCRIPT_SIMPLIFIED_HAN = 73, /* Hans */
- USCRIPT_TRADITIONAL_HAN = 74, /* Hant */
- USCRIPT_PAHAWH_HMONG = 75, /* Hmng */
- USCRIPT_OLD_HUNGARIAN = 76, /* Hung */
- USCRIPT_HARAPPAN_INDUS = 77, /* Inds */
- USCRIPT_JAVANESE = 78, /* Java */
- USCRIPT_KAYAH_LI = 79, /* Kali */
- USCRIPT_LATIN_FRAKTUR = 80, /* Latf */
- USCRIPT_LATIN_GAELIC = 81, /* Latg */
- USCRIPT_LEPCHA = 82, /* Lepc */
- USCRIPT_LINEAR_A = 83, /* Lina */
- USCRIPT_MANDAEAN = 84, /* Mand */
- USCRIPT_MAYAN_HIEROGLYPHS = 85, /* Maya */
- USCRIPT_MEROITIC = 86, /* Mero */
- USCRIPT_NKO = 87, /* Nkoo */
- USCRIPT_ORKHON = 88, /* Orkh */
- USCRIPT_OLD_PERMIC = 89, /* Perm */
- USCRIPT_PHAGS_PA = 90, /* Phag */
- USCRIPT_PHOENICIAN = 91, /* Phnx */
- USCRIPT_PHONETIC_POLLARD = 92, /* Plrd */
- USCRIPT_RONGORONGO = 93, /* Roro */
- USCRIPT_SARATI = 94, /* Sara */
- USCRIPT_ESTRANGELO_SYRIAC = 95, /* Syre */
- USCRIPT_WESTERN_SYRIAC = 96, /* Syrj */
- USCRIPT_EASTERN_SYRIAC = 97, /* Syrn */
- USCRIPT_TENGWAR = 98, /* Teng */
- USCRIPT_VAI = 99, /* Vaii */
- USCRIPT_VISIBLE_SPEECH = 100, /* Visp */
- USCRIPT_CUNEIFORM = 101,/* Xsux */
- USCRIPT_UNWRITTEN_LANGUAGES = 102,/* Zxxx */
- USCRIPT_UNKNOWN = 103,/* Zzzz */ /* Unknown="Code for uncoded script", for unassigned code points */
- /* Private use codes from Qaaa - Qabx are not supported*/
-#endif /* U_HIDE_DRAFT_API */
- USCRIPT_CODE_LIMIT = 104
-} UScriptCode;
-
-#endif
diff --git a/Source/WTF/wtf/unicode/UTF8.cpp b/Source/WTF/wtf/unicode/UTF8.cpp
index 1a12e1241..5407401e6 100644
--- a/Source/WTF/wtf/unicode/UTF8.cpp
+++ b/Source/WTF/wtf/unicode/UTF8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2007, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,10 +11,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -28,7 +28,7 @@
#include "UTF8.h"
#include "ASCIICType.h"
-#include <wtf/StringHasher.h>
+#include <wtf/Hasher.h>
#include <wtf/unicode/CharacterNames.h>
namespace WTF {
@@ -420,10 +420,10 @@ unsigned calculateStringHashAndLengthFromUTF8MaskingTop8Bits(const char* data, c
return stringHasher.hashWithTop8BitsMasked();
}
-bool equalUTF16WithUTF8(const UChar* a, const UChar* aEnd, const char* b, const char* bEnd)
+bool equalUTF16WithUTF8(const UChar* a, const char* b, const char* bEnd)
{
while (b < bEnd) {
- if (isASCII(*b)) {
+ if (isASCII(*a) || isASCII(*b)) {
if (*a++ != *b++)
return false;
continue;
@@ -435,7 +435,7 @@ bool equalUTF16WithUTF8(const UChar* a, const UChar* aEnd, const char* b, const
return false;
if (!isLegalUTF8(reinterpret_cast<const unsigned char*>(b), utf8SequenceLength))
- return 0;
+ return false;
UChar32 character = readUTF8Sequence(b, utf8SequenceLength);
ASSERT(!isASCII(character));
@@ -455,7 +455,33 @@ bool equalUTF16WithUTF8(const UChar* a, const UChar* aEnd, const char* b, const
return false;
}
- return a == aEnd;
+ return true;
+}
+
+bool equalLatin1WithUTF8(const LChar* a, const char* b, const char* bEnd)
+{
+ while (b < bEnd) {
+ if (isASCII(*a) || isASCII(*b)) {
+ if (*a++ != *b++)
+ return false;
+ continue;
+ }
+
+ if (b + 1 == bEnd)
+ return false;
+
+ if ((b[0] & 0xE0) != 0xC0 || (b[1] & 0xC0) != 0x80)
+ return false;
+
+ LChar character = ((b[0] & 0x1F) << 6) | (b[1] & 0x3F);
+
+ b += 2;
+
+ if (*a++ != character)
+ return false;
+ }
+
+ return true;
}
} // namespace Unicode
diff --git a/Source/WTF/wtf/unicode/UTF8.h b/Source/WTF/wtf/unicode/UTF8.h
index e95cc1288..354aad71e 100644
--- a/Source/WTF/wtf/unicode/UTF8.h
+++ b/Source/WTF/wtf/unicode/UTF8.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -26,7 +26,8 @@
#ifndef WTF_UTF8_h
#define WTF_UTF8_h
-#include <wtf/unicode/Unicode.h>
+#include <unicode/utypes.h>
+#include <wtf/text/LChar.h>
namespace WTF {
namespace Unicode {
@@ -76,7 +77,9 @@ namespace Unicode {
WTF_EXPORT_PRIVATE unsigned calculateStringHashAndLengthFromUTF8MaskingTop8Bits(const char* data, const char* dataEnd, unsigned& dataLength, unsigned& utf16Length);
- WTF_EXPORT_PRIVATE bool equalUTF16WithUTF8(const UChar* a, const UChar* aEnd, const char* b, const char* bEnd);
+ // The caller of these functions already knows that the lengths are the same, so we omit an end argument for UTF-16 and Latin-1.
+ bool equalUTF16WithUTF8(const UChar* stringInUTF16, const char* stringInUTF8, const char* stringInUTF8End);
+ bool equalLatin1WithUTF8(const LChar* stringInLatin1, const char* stringInUTF8, const char* stringInUTF8End);
} // namespace Unicode
} // namespace WTF
diff --git a/Source/WTF/wtf/unicode/Unicode.h b/Source/WTF/wtf/unicode/Unicode.h
deleted file mode 100644
index 84f777342..000000000
--- a/Source/WTF/wtf/unicode/Unicode.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2006 George Staikos <staikos@kde.org>
- * Copyright (C) 2006, 2008, 2009 Apple Inc. All rights reserved.
- * Copyright (C) 2007-2009 Torch Mobile, Inc.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public License
- * along with this library; see the file COPYING.LIB. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-#ifndef WTF_UNICODE_H
-#define WTF_UNICODE_H
-
-#include <wtf/Assertions.h>
-
-// Define platform neutral 8 bit character type (L is for Latin-1).
-typedef unsigned char LChar;
-
-#include <wtf/unicode/icu/UnicodeIcu.h>
-
-static_assert(sizeof(UChar) == 2, "UChar must be two bytes!");
-
-#endif // WTF_UNICODE_H
diff --git a/Source/WTF/wtf/unicode/UnicodeMacrosFromICU.h b/Source/WTF/wtf/unicode/UnicodeMacrosFromICU.h
deleted file mode 100644
index 09a7036e3..000000000
--- a/Source/WTF/wtf/unicode/UnicodeMacrosFromICU.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 1999-2004, International Business Machines Corporation and others. All Rights Reserved.
- *
- */
-
-#ifndef UnicodeMacrosFromICU_h
-#define UnicodeMacrosFromICU_h
-
-// some defines from ICU
-
-#define U_IS_BMP(c) ((UChar32)(c)<=0xffff)
-#define U16_IS_LEAD(c) (((c)&0xfffffc00)==0xd800)
-#define U16_IS_TRAIL(c) (((c)&0xfffffc00)==0xdc00)
-#define U16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000)
-#define U16_GET_SUPPLEMENTARY(lead, trail) \
- (((UChar32)(lead)<<10UL)+(UChar32)(trail)-U16_SURROGATE_OFFSET)
-
-#define U16_LEAD(supplementary) (UChar)(((supplementary)>>10)+0xd7c0)
-#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff)|0xdc00)
-#define U16_LENGTH(c) ((uint32_t)(c) <= 0xffff ? 1 : 2)
-
-#define U_IS_SUPPLEMENTARY(c) ((UChar32)((c)-0x10000)<=0xfffff)
-#define U_IS_SURROGATE(c) (((c)&0xfffff800)==0xd800)
-#define U16_IS_SINGLE(c) !U_IS_SURROGATE(c)
-#define U16_IS_SURROGATE(c) U_IS_SURROGATE(c)
-#define U16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
-
-#define U16_GET(s, start, i, length, c) { \
- (c)=(s)[i]; \
- if(U16_IS_SURROGATE(c)) { \
- uint16_t __c2; \
- if(U16_IS_SURROGATE_LEAD(c)) { \
- if((i)+1<(length) && U16_IS_TRAIL(__c2=(s)[(i)+1])) { \
- (c)=U16_GET_SUPPLEMENTARY((c), __c2); \
- } \
- } else { \
- if((i)-1>=(start) && U16_IS_LEAD(__c2=(s)[(i)-1])) { \
- (c)=U16_GET_SUPPLEMENTARY(__c2, (c)); \
- } \
- } \
- } \
-}
-
-#define U16_PREV(s, start, i, c) { \
- (c)=(s)[--(i)]; \
- if(U16_IS_TRAIL(c)) { \
- uint16_t __c2; \
- if((i)>(start) && U16_IS_LEAD(__c2=(s)[(i)-1])) { \
- --(i); \
- (c)=U16_GET_SUPPLEMENTARY(__c2, (c)); \
- } \
- } \
-}
-
-#define U16_BACK_1(s, start, i) { \
- if(U16_IS_TRAIL((s)[--(i)]) && (i)>(start) && U16_IS_LEAD((s)[(i)-1])) { \
- --(i); \
- } \
-}
-
-#define U16_NEXT(s, i, length, c) { \
- (c)=(s)[(i)++]; \
- if(U16_IS_LEAD(c)) { \
- uint16_t __c2; \
- if((i)<(length) && U16_IS_TRAIL(__c2=(s)[(i)])) { \
- ++(i); \
- (c)=U16_GET_SUPPLEMENTARY((c), __c2); \
- } \
- } \
-}
-
-#define U16_FWD_1(s, i, length) { \
- if(U16_IS_LEAD((s)[(i)++]) && (i)<(length) && U16_IS_TRAIL((s)[i])) { \
- ++(i); \
- } \
-}
-
-#define U_MASK(x) ((uint32_t)1<<(x))
-
-#define U8_MAX_LENGTH 4
-
-#define U8_APPEND_UNSAFE(s, i, c) { \
- if((uint32_t)(c)<=0x7f) { \
- (s)[(i)++]=(uint8_t)(c); \
- } else { \
- if((uint32_t)(c)<=0x7ff) { \
- (s)[(i)++]=(uint8_t)(((c)>>6)|0xc0); \
- } else { \
- if((uint32_t)(c)<=0xffff) { \
- (s)[(i)++]=(uint8_t)(((c)>>12)|0xe0); \
- } else { \
- (s)[(i)++]=(uint8_t)(((c)>>18)|0xf0); \
- (s)[(i)++]=(uint8_t)((((c)>>12)&0x3f)|0x80); \
- } \
- (s)[(i)++]=(uint8_t)((((c)>>6)&0x3f)|0x80); \
- } \
- (s)[(i)++]=(uint8_t)(((c)&0x3f)|0x80); \
- } \
-}
-#endif
diff --git a/Source/WTF/wtf/unicode/icu/CollatorICU.cpp b/Source/WTF/wtf/unicode/icu/CollatorICU.cpp
index e4059bcdc..26a4d8386 100644
--- a/Source/WTF/wtf/unicode/icu/CollatorICU.cpp
+++ b/Source/WTF/wtf/unicode/icu/CollatorICU.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -29,129 +29,246 @@
#include "config.h"
#include <wtf/unicode/Collator.h>
+// FIXME: Merge this with CollatorDefault.cpp into a single Collator.cpp source file.
+
#if !UCONFIG_NO_COLLATION
#include <mutex>
-#include <wtf/Assertions.h>
-#include <wtf/StringExtras.h>
#include <unicode/ucol.h>
-#include <string.h>
+#include <wtf/Lock.h>
+#include <wtf/StringExtras.h>
+#include <wtf/text/StringView.h>
-#if OS(DARWIN)
-#include <wtf/RetainPtr.h>
+#if OS(DARWIN) && USE(CF)
#include <CoreFoundation/CoreFoundation.h>
+#include <wtf/RetainPtr.h>
#endif
namespace WTF {
static UCollator* cachedCollator;
+static char* cachedCollatorLocale;
+static bool cachedCollatorShouldSortLowercaseFirst;
-static std::mutex& cachedCollatorMutex()
-{
- static std::once_flag onceFlag;
- static std::mutex* mutex;
- std::call_once(onceFlag, []{
- mutex = std::make_unique<std::mutex>().release();
- });
+static StaticLock cachedCollatorMutex;
+
+#if !(OS(DARWIN) && USE(CF))
- return *mutex;
+static inline const char* resolveDefaultLocale(const char* locale)
+{
+ return locale;
}
-Collator::Collator(const char* locale)
- : m_collator(0)
- , m_locale(locale ? strdup(locale) : 0)
- , m_lowerFirst(false)
+#else
+
+static inline char* copyShortASCIIString(CFStringRef string)
{
+ // OK to have a fixed size buffer and to only handle ASCII since we only use this for locale names.
+ char buffer[256];
+ if (!string || !CFStringGetCString(string, buffer, sizeof(buffer), kCFStringEncodingASCII))
+ return strdup("");
+ return strdup(buffer);
}
-std::unique_ptr<Collator> Collator::userDefault()
+static char* copyDefaultLocale()
{
-#if OS(DARWIN) && USE(CF)
- // Mac OS X doesn't set UNIX locale to match user-selected one, so ICU default doesn't work.
-#if !OS(IOS)
- RetainPtr<CFLocaleRef> currentLocale = adoptCF(CFLocaleCopyCurrent());
- CFStringRef collationOrder = (CFStringRef)CFLocaleGetValue(currentLocale.get(), kCFLocaleCollatorIdentifier);
+#if !PLATFORM(IOS)
+ return copyShortASCIIString(static_cast<CFStringRef>(CFLocaleGetValue(adoptCF(CFLocaleCopyCurrent()).get(), kCFLocaleCollatorIdentifier)));
#else
- RetainPtr<CFStringRef> collationOrderRetainer = adoptCF((CFStringRef)CFPreferencesCopyValue(CFSTR("AppleCollationOrder"), kCFPreferencesAnyApplication, kCFPreferencesCurrentUser, kCFPreferencesAnyHost));
- CFStringRef collationOrder = collationOrderRetainer.get();
-#endif
- char buf[256];
- if (!collationOrder)
- return std::make_unique<Collator>("");
- CFStringGetCString(collationOrder, buf, sizeof(buf), kCFStringEncodingASCII);
- return std::make_unique<Collator>(buf);
-#else
- return std::make_unique<Collator>(static_cast<const char*>(0));
+ // FIXME: Documentation claims the code above would work on iOS 4.0 and later. After test that works, we should remove this and use that instead.
+ return copyShortASCIIString(adoptCF(static_cast<CFStringRef>(CFPreferencesCopyValue(CFSTR("AppleCollationOrder"), kCFPreferencesAnyApplication, kCFPreferencesCurrentUser, kCFPreferencesAnyHost))).get());
#endif
}
-Collator::~Collator()
+static inline const char* resolveDefaultLocale(const char* locale)
{
- releaseCollator();
- free(m_locale);
+ if (locale)
+ return locale;
+ // Since iOS and OS X don't set UNIX locale to match the user's selected locale, the ICU default locale is not the right one.
+ // So, instead of passing null to ICU, we pass the name of the user's selected locale.
+ static char* defaultLocale;
+ static std::once_flag initializeDefaultLocaleOnce;
+ std::call_once(initializeDefaultLocaleOnce, []{
+ defaultLocale = copyDefaultLocale();
+ });
+ return defaultLocale;
}
-void Collator::setOrderLowerFirst(bool lowerFirst)
-{
- m_lowerFirst = lowerFirst;
-}
+#endif
-Collator::Result Collator::collate(const UChar* lhs, size_t lhsLength, const UChar* rhs, size_t rhsLength) const
+static inline bool localesMatch(const char* a, const char* b)
{
- if (!m_collator)
- createCollator();
-
- return static_cast<Result>(ucol_strcoll(m_collator, lhs, lhsLength, rhs, rhsLength));
+ // Two null locales are equal, other locales are compared with strcmp.
+ return a == b || (a && b && !strcmp(a, b));
}
-void Collator::createCollator() const
+Collator::Collator(const char* locale, bool shouldSortLowercaseFirst)
{
- ASSERT(!m_collator);
UErrorCode status = U_ZERO_ERROR;
{
- std::lock_guard<std::mutex> lock(cachedCollatorMutex());
- if (cachedCollator) {
- const char* cachedCollatorLocale = ucol_getLocaleByType(cachedCollator, ULOC_REQUESTED_LOCALE, &status);
- ASSERT(U_SUCCESS(status));
- ASSERT(cachedCollatorLocale);
-
- UColAttributeValue cachedCollatorLowerFirst = ucol_getAttribute(cachedCollator, UCOL_CASE_FIRST, &status);
- ASSERT(U_SUCCESS(status));
-
- // FIXME: default locale is never matched, because ucol_getLocaleByType returns the actual one used, not 0.
- if (m_locale && 0 == strcmp(cachedCollatorLocale, m_locale)
- && ((UCOL_LOWER_FIRST == cachedCollatorLowerFirst && m_lowerFirst) || (UCOL_UPPER_FIRST == cachedCollatorLowerFirst && !m_lowerFirst))) {
- m_collator = cachedCollator;
- cachedCollator = nullptr;
- return;
- }
+ std::lock_guard<StaticLock> lock(cachedCollatorMutex);
+ if (cachedCollator && localesMatch(cachedCollatorLocale, locale) && cachedCollatorShouldSortLowercaseFirst == shouldSortLowercaseFirst) {
+ m_collator = cachedCollator;
+ m_locale = cachedCollatorLocale;
+ m_shouldSortLowercaseFirst = shouldSortLowercaseFirst;
+ cachedCollator = nullptr;
+ cachedCollatorLocale = nullptr;
+ return;
}
}
- m_collator = ucol_open(m_locale, &status);
+ m_collator = ucol_open(resolveDefaultLocale(locale), &status);
if (U_FAILURE(status)) {
status = U_ZERO_ERROR;
- m_collator = ucol_open("", &status); // Fallback to Unicode Collation Algorithm.
+ m_collator = ucol_open("", &status); // Fall back to Unicode Collation Algorithm.
}
ASSERT(U_SUCCESS(status));
- ucol_setAttribute(m_collator, UCOL_CASE_FIRST, m_lowerFirst ? UCOL_LOWER_FIRST : UCOL_UPPER_FIRST, &status);
+ ucol_setAttribute(m_collator, UCOL_CASE_FIRST, shouldSortLowercaseFirst ? UCOL_LOWER_FIRST : UCOL_UPPER_FIRST, &status);
ASSERT(U_SUCCESS(status));
ucol_setAttribute(m_collator, UCOL_NORMALIZATION_MODE, UCOL_ON, &status);
ASSERT(U_SUCCESS(status));
+
+ m_locale = locale ? fastStrDup(locale) : nullptr;
+ m_shouldSortLowercaseFirst = shouldSortLowercaseFirst;
}
-void Collator::releaseCollator()
+Collator::~Collator()
{
- {
- std::lock_guard<std::mutex> lock(cachedCollatorMutex());
- if (cachedCollator)
- ucol_close(cachedCollator);
- cachedCollator = m_collator;
- m_collator = nullptr;
+ std::lock_guard<StaticLock> lock(cachedCollatorMutex);
+ if (cachedCollator) {
+ ucol_close(cachedCollator);
+ fastFree(cachedCollatorLocale);
+ }
+ cachedCollator = m_collator;
+ cachedCollatorLocale = m_locale;
+ cachedCollatorShouldSortLowercaseFirst = m_shouldSortLowercaseFirst;
+}
+
+static int32_t getIndexLatin1(UCharIterator* iterator, UCharIteratorOrigin origin)
+{
+ switch (origin) {
+ case UITER_START:
+ return iterator->start;
+ case UITER_CURRENT:
+ return iterator->index;
+ case UITER_LIMIT:
+ return iterator->limit;
+ case UITER_ZERO:
+ return 0;
+ case UITER_LENGTH:
+ return iterator->length;
}
+ ASSERT_NOT_REACHED();
+ return U_SENTINEL;
+}
+
+static int32_t moveLatin1(UCharIterator* iterator, int32_t delta, UCharIteratorOrigin origin)
+{
+ return iterator->index = getIndexLatin1(iterator, origin) + delta;
+}
+
+static UBool hasNextLatin1(UCharIterator* iterator)
+{
+ return iterator->index < iterator->limit;
+}
+
+static UBool hasPreviousLatin1(UCharIterator* iterator)
+{
+ return iterator->index > iterator->start;
+}
+
+static UChar32 currentLatin1(UCharIterator* iterator)
+{
+ ASSERT(iterator->index >= iterator->start);
+ if (iterator->index >= iterator->limit)
+ return U_SENTINEL;
+ return static_cast<const LChar*>(iterator->context)[iterator->index];
+}
+
+static UChar32 nextLatin1(UCharIterator* iterator)
+{
+ ASSERT(iterator->index >= iterator->start);
+ if (iterator->index >= iterator->limit)
+ return U_SENTINEL;
+ return static_cast<const LChar*>(iterator->context)[iterator->index++];
+}
+
+static UChar32 previousLatin1(UCharIterator* iterator)
+{
+ if (iterator->index <= iterator->start)
+ return U_SENTINEL;
+ return static_cast<const LChar*>(iterator->context)[--iterator->index];
+}
+
+static uint32_t getStateLatin1(const UCharIterator* iterator)
+{
+ return iterator->index;
+}
+
+static void setStateLatin1(UCharIterator* iterator, uint32_t state, UErrorCode*)
+{
+ iterator->index = state;
+}
+
+static UCharIterator createLatin1Iterator(const LChar* characters, int length)
+{
+ UCharIterator iterator;
+ iterator.context = characters;
+ iterator.length = length;
+ iterator.start = 0;
+ iterator.index = 0;
+ iterator.limit = length;
+ iterator.reservedField = 0;
+ iterator.getIndex = getIndexLatin1;
+ iterator.move = moveLatin1;
+ iterator.hasNext = hasNextLatin1;
+ iterator.hasPrevious = hasPreviousLatin1;
+ iterator.current = currentLatin1;
+ iterator.next = nextLatin1;
+ iterator.previous = previousLatin1;
+ iterator.reservedFn = nullptr;
+ iterator.getState = getStateLatin1;
+ iterator.setState = setStateLatin1;
+ return iterator;
+}
+
+UCharIterator createIterator(StringView string)
+{
+ if (string.is8Bit())
+ return createLatin1Iterator(string.characters8(), string.length());
+ UCharIterator iterator;
+ uiter_setString(&iterator, string.characters16(), string.length());
+ return iterator;
+}
+
+int Collator::collate(StringView a, StringView b) const
+{
+ UCharIterator iteratorA = createIterator(a);
+ UCharIterator iteratorB = createIterator(b);
+ UErrorCode status = U_ZERO_ERROR;
+ int result = ucol_strcollIter(m_collator, &iteratorA, &iteratorB, &status);
+ ASSERT(U_SUCCESS(status));
+ return result;
+}
+
+static UCharIterator createIteratorUTF8(const char* string)
+{
+ UCharIterator iterator;
+ uiter_setUTF8(&iterator, string, strlen(string));
+ return iterator;
+}
+
+int Collator::collateUTF8(const char* a, const char* b) const
+{
+ UCharIterator iteratorA = createIteratorUTF8(a);
+ UCharIterator iteratorB = createIteratorUTF8(b);
+ UErrorCode status = U_ZERO_ERROR;
+ int result = ucol_strcollIter(m_collator, &iteratorA, &iteratorB, &status);
+ ASSERT(U_SUCCESS(status));
+ return result;
}
} // namespace WTF
diff --git a/Source/WTF/wtf/unicode/icu/UnicodeIcu.h b/Source/WTF/wtf/unicode/icu/UnicodeIcu.h
deleted file mode 100644
index 0a386b59d..000000000
--- a/Source/WTF/wtf/unicode/icu/UnicodeIcu.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2006 George Staikos <staikos@kde.org>
- * Copyright (C) 2006 Alexey Proskuryakov <ap@nypop.com>
- * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public License
- * along with this library; see the file COPYING.LIB. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-#ifndef WTF_UNICODE_ICU_H
-#define WTF_UNICODE_ICU_H
-
-#include <stdlib.h>
-#include <unicode/uchar.h>
-#include <unicode/uscript.h>
-#include <unicode/ustring.h>
-#include <unicode/utf16.h>
-
-#endif // WTF_UNICODE_ICU_H
diff --git a/Source/WTF/wtf/win/GDIObject.h b/Source/WTF/wtf/win/GDIObject.h
deleted file mode 100644
index 4e01b3cec..000000000
--- a/Source/WTF/wtf/win/GDIObject.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef GDIObject_h
-#define GDIObject_h
-
-#include <algorithm>
-#include <cstddef>
-#include <memory>
-#include <windows.h>
-#include <wtf/Assertions.h>
-#include <wtf/Noncopyable.h>
-
-namespace WTF {
-
-template<typename T> void deleteObject(T);
-
-template<typename T> class GDIObject {
- WTF_MAKE_NONCOPYABLE(GDIObject);
-public:
- GDIObject() : m_object(0) { }
- GDIObject(std::nullptr_t) : m_object(0) { }
- ~GDIObject() { deleteObject<T>(m_object); }
-
- T get() const { return m_object; }
-
- void clear();
- T leak() WARN_UNUSED_RETURN;
-
- bool operator!() const { return !m_object; }
-
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef const void* UnspecifiedBoolType;
- operator UnspecifiedBoolType() const { return m_object ? reinterpret_cast<UnspecifiedBoolType>(&m_object) : 0; }
-
- GDIObject<T>& operator=(std::nullptr_t) { clear(); return *this; }
-
- GDIObject(GDIObject&&);
- template<typename U> GDIObject(GDIObject<U>&&);
-
- GDIObject& operator=(GDIObject&&);
- template<typename U> GDIObject& operator=(GDIObject<U>&&);
-
- void swap(GDIObject& o) { std::swap(m_object, o.m_object); }
-
-private:
- template<typename U> friend GDIObject<U> adoptGDIObject(U);
- GDIObject(T object) : m_object(object) { }
-
- GDIObject<T>& operator=(T);
-
- T m_object;
-};
-
-template<typename T> inline void GDIObject<T>::clear()
-{
- T object = m_object;
- m_object = 0;
- deleteObject(object);
-}
-
-template<typename T> inline T GDIObject<T>::leak()
-{
- T object = m_object;
- m_object = 0;
- return object;
-}
-
-template<typename T> inline GDIObject<T>::GDIObject(GDIObject<T>&& other)
- : m_object(other.leak())
-{
-}
-
-template<typename T> inline GDIObject<T>& GDIObject<T>::operator=(GDIObject<T>&& other)
-{
- auto object = std::move(other);
- swap(object);
- return *this;
-}
-
-template<typename T> inline GDIObject<T> adoptGDIObject(T object)
-{
- return GDIObject<T>(object);
-}
-
-template<typename T> inline void swap(GDIObject<T>& a, GDIObject<T>& b)
-{
- a.swap(b);
-}
-
-// Nearly all GDI types use the same DeleteObject call.
-template<typename T> inline void deleteObject(T object)
-{
- if (object)
- ::DeleteObject(object);
-}
-
-template<> inline void deleteObject<HDC>(HDC hdc)
-{
- if (hdc)
- ::DeleteDC(hdc);
-}
-
-} // namespace WTF
-
-using WTF::GDIObject;
-using WTF::adoptGDIObject;
-
-#endif // GDIObject_h