summaryrefslogtreecommitdiff
path: root/lib/scudo/standalone
diff options
context:
space:
mode:
Diffstat (limited to 'lib/scudo/standalone')
-rw-r--r--lib/scudo/standalone/combined.h2
-rw-r--r--lib/scudo/standalone/primary64.h6
-rw-r--r--lib/scudo/standalone/tests/wrappers_c_test.cpp41
3 files changed, 46 insertions, 3 deletions
diff --git a/lib/scudo/standalone/combined.h b/lib/scudo/standalone/combined.h
index 3c3e4786d..05a4ebcdf 100644
--- a/lib/scudo/standalone/combined.h
+++ b/lib/scudo/standalone/combined.h
@@ -375,7 +375,7 @@ public:
const uptr From = Base;
const uptr To = Base + Size;
auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
- if (Block < From || Block > To)
+ if (Block < From || Block >= To)
return;
uptr ChunkSize;
const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
diff --git a/lib/scudo/standalone/primary64.h b/lib/scudo/standalone/primary64.h
index fd3709ecb..96fd1e6d5 100644
--- a/lib/scudo/standalone/primary64.h
+++ b/lib/scudo/standalone/primary64.h
@@ -36,7 +36,7 @@ namespace scudo {
// freelist to the thread specific freelist, and back.
//
// The memory used by this allocator is never unmapped, but can be partially
-// released it the platform allows for it.
+// released if the platform allows for it.
template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
public:
@@ -135,7 +135,9 @@ public:
}
template <typename F> void iterateOverBlocks(F Callback) const {
- for (uptr I = 1; I < NumClasses; I++) {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
const RegionInfo *Region = getRegionInfo(I);
const uptr BlockSize = getSizeByClassId(I);
const uptr From = Region->RegionBeg;
diff --git a/lib/scudo/standalone/tests/wrappers_c_test.cpp b/lib/scudo/standalone/tests/wrappers_c_test.cpp
index 5498c165c..6d6bbbf61 100644
--- a/lib/scudo/standalone/tests/wrappers_c_test.cpp
+++ b/lib/scudo/standalone/tests/wrappers_c_test.cpp
@@ -14,6 +14,14 @@
#include <malloc.h>
#include <unistd.h>
+extern "C" {
+void malloc_enable(void);
+void malloc_disable(void);
+int malloc_iterate(uintptr_t base, size_t size,
+ void (*callback)(uintptr_t base, size_t size, void *arg),
+ void *arg);
+}
+
// Note that every C allocation function in the test binary will be fulfilled
// by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
// But this might also lead to unexpected side-effects, since the allocation and
@@ -239,3 +247,36 @@ TEST(ScudoWrappersCTest, MallInfo) {
MI = mallinfo();
EXPECT_GE(static_cast<size_t>(MI.fordblks), Free + BypassQuarantineSize);
}
+
+static uintptr_t BoundaryP;
+static size_t Count;
+
+static void callback(uintptr_t Base, size_t Size, void *Arg) {
+ if (Base == BoundaryP)
+ Count++;
+}
+
+// Verify that a block located on an iteration boundary is not mis-accounted.
+// To achieve this, we allocate a chunk for which the backing block will be
+// aligned on a page, then run the malloc_iterate on both the pages that the
+// block is a boundary for. It must only be seen once by the callback function.
+TEST(ScudoWrappersCTest, MallocIterateBoundary) {
+ const size_t PageSize = sysconf(_SC_PAGESIZE);
+ const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
+ const size_t SpecialSize = PageSize - BlockDelta;
+
+ void *P = malloc(SpecialSize);
+ EXPECT_NE(P, nullptr);
+ BoundaryP = reinterpret_cast<uintptr_t>(P);
+ const uintptr_t Block = BoundaryP - BlockDelta;
+ EXPECT_EQ((Block & (PageSize - 1)), 0U);
+
+ Count = 0U;
+ malloc_disable();
+ malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
+ malloc_iterate(Block, PageSize, callback, nullptr);
+ malloc_enable();
+ EXPECT_EQ(Count, 1U);
+
+ free(P);
+}