summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael BrĂ¼ning <michael.bruning@qt.io>2019-03-21 17:06:47 +0100
committerMichael BrĂ¼ning <michael.bruning@qt.io>2019-03-27 16:12:53 +0000
commit8b01fa3780aa1f9a0cdba6e27df0a8d9451134ca (patch)
treec9cc1b5fa1dbc741ed46ac404d0920dc924629a5
parent36c2c5e8b27f7619cc880e8b84031eaec680cdbd (diff)
downloadqtwebengine-chromium-8b01fa3780aa1f9a0cdba6e27df0a8d9451134ca.tar.gz
[Backport] Security bug 905509 (5/13)
Backport of original patch by Antoine Labour <piman@chromium.org> Use uint32_t for sizes in gpu::FencedAllocator Since it is backed by a gpu::Buffer, the size is capped at 4GB. Also use uint32_t consistently instead of unsigned int. Bug: 905509 Change-Id: Ia53250b0cc512799bff502ee4e7552385ace2f8e Reviewed-on: https://chromium-review.googlesource.com/c/1399226 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.cc59
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.h43
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory.h4
3 files changed, 52 insertions, 54 deletions
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.cc b/chromium/gpu/command_buffer/client/fenced_allocator.cc
index 4ca5ff9fffd..8eddfef4e88 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.cc
@@ -17,19 +17,19 @@ namespace gpu {
namespace {
// Round down to the largest multiple of kAllocAlignment no greater than |size|.
-unsigned int RoundDown(unsigned int size) {
+uint32_t RoundDown(uint32_t size) {
return size & ~(FencedAllocator::kAllocAlignment - 1);
}
-// Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
-unsigned int RoundUp(unsigned int size) {
- return (size + (FencedAllocator::kAllocAlignment - 1)) &
- ~(FencedAllocator::kAllocAlignment - 1);
+base::CheckedNumeric<uint32_t> RoundUp(uint32_t size) {
+ return (base::CheckedNumeric<uint32_t>(size) +
+ (FencedAllocator::kAllocAlignment - 1)) &
+ ~(FencedAllocator::kAllocAlignment - 1);
}
} // namespace
-FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper)
+FencedAllocator::FencedAllocator(uint32_t size, CommandBufferHelper* helper)
: helper_(helper), bytes_in_use_(0) {
Block block = { FREE, 0, RoundDown(size), kUnusedToken };
blocks_.push_back(block);
@@ -47,7 +47,7 @@ FencedAllocator::~FencedAllocator() {
// blocks, waiting for them. The current implementation isn't smart about
// optimizing what to wait for, just looks inside the block in order (first-fit
// as well).
-FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
+FencedAllocator::Offset FencedAllocator::Alloc(uint32_t size) {
// size of 0 is not allowed because it would be inconsistent to only sometimes
// have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
if (size == 0) {
@@ -55,24 +55,27 @@ FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
}
// Round up the allocation size to ensure alignment.
- size = RoundUp(size);
+ uint32_t aligned_size = 0;
+ if (!RoundUp(size).AssignIfValid(&aligned_size)) {
+ return kInvalidOffset;
+ }
// Try first to allocate in a free block.
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
- Block &block = blocks_[i];
- if (block.state == FREE && block.size >= size) {
- return AllocInBlock(i, size);
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == FREE && block.size >= aligned_size) {
+ return AllocInBlock(i, aligned_size);
}
}
// No free block is available. Look for blocks pending tokens, and wait for
// them to be re-usable.
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
if (blocks_[i].state != FREE_PENDING_TOKEN)
continue;
i = WaitForTokenAndFreeBlock(i);
- if (blocks_[i].size >= size)
- return AllocInBlock(i, size);
+ if (blocks_[i].size >= aligned_size)
+ return AllocInBlock(i, aligned_size);
}
return kInvalidOffset;
}
@@ -105,10 +108,10 @@ void FencedAllocator::FreePendingToken(FencedAllocator::Offset offset,
}
// Gets the max of the size of the blocks marked as free.
-unsigned int FencedAllocator::GetLargestFreeSize() {
+uint32_t FencedAllocator::GetLargestFreeSize() {
FreeUnused();
- unsigned int max_size = 0;
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ uint32_t max_size = 0;
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
Block &block = blocks_[i];
if (block.state == FREE)
max_size = std::max(max_size, block.size);
@@ -118,10 +121,10 @@ unsigned int FencedAllocator::GetLargestFreeSize() {
// Gets the size of the largest segment of blocks that are either FREE or
// FREE_PENDING_TOKEN.
-unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
- unsigned int max_size = 0;
- unsigned int current_size = 0;
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+uint32_t FencedAllocator::GetLargestFreeOrPendingSize() {
+ uint32_t max_size = 0;
+ uint32_t current_size = 0;
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
Block &block = blocks_[i];
if (block.state == IN_USE) {
max_size = std::max(max_size, current_size);
@@ -135,10 +138,10 @@ unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
}
// Gets the total size of all blocks marked as free.
-unsigned int FencedAllocator::GetFreeSize() {
+uint32_t FencedAllocator::GetFreeSize() {
FreeUnused();
- unsigned int size = 0;
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ uint32_t size = 0;
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
Block& block = blocks_[i];
if (block.state == FREE)
size += block.size;
@@ -152,7 +155,7 @@ unsigned int FencedAllocator::GetFreeSize() {
// - the successive offsets match the block sizes, and they are in order.
bool FencedAllocator::CheckConsistency() {
if (blocks_.size() < 1) return false;
- for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
+ for (uint32_t i = 0; i < blocks_.size() - 1; ++i) {
Block &current = blocks_[i];
Block &next = blocks_[i + 1];
// This test is NOT included in the next one, because offset is unsigned.
@@ -216,7 +219,7 @@ FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
// Frees any blocks pending a token for which the token has been read.
void FencedAllocator::FreeUnused() {
- for (unsigned int i = 0; i < blocks_.size();) {
+ for (uint32_t i = 0; i < blocks_.size();) {
Block& block = blocks_[i];
if (block.state == FREE_PENDING_TOKEN &&
helper_->HasTokenPassed(block.token)) {
@@ -231,7 +234,7 @@ void FencedAllocator::FreeUnused() {
// If the block is exactly the requested size, simply mark it IN_USE, otherwise
// split it and mark the first one (of the requested size) IN_USE.
FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
- unsigned int size) {
+ uint32_t size) {
Block &block = blocks_[index];
DCHECK_GE(block.size, size);
DCHECK_EQ(block.state, FREE);
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.h b/chromium/gpu/command_buffer/client/fenced_allocator.h
index 9c42d34ec13..7d2d39746c0 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.h
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.h
@@ -32,19 +32,19 @@ class CommandBufferHelper;
// (see http://www.corp.google.com/eng/doc/cpp_primer.html#thread_safety).
class GPU_EXPORT FencedAllocator {
public:
- typedef unsigned int Offset;
+ typedef uint32_t Offset;
// Invalid offset, returned by Alloc in case of failure.
enum : Offset { kInvalidOffset = 0xffffffffU };
// Allocation alignment, must be a power of two.
- enum : unsigned int { kAllocAlignment = 16 };
+ enum : uint32_t { kAllocAlignment = 16 };
// Status of a block of memory, for book-keeping.
enum State { IN_USE, FREE, FREE_PENDING_TOKEN };
// Creates a FencedAllocator. Note that the size of the buffer is passed, but
// not its base address: everything is handled as offsets into the buffer.
- FencedAllocator(unsigned int size, CommandBufferHelper* helper);
+ FencedAllocator(uint32_t size, CommandBufferHelper* helper);
~FencedAllocator();
@@ -58,7 +58,7 @@ class GPU_EXPORT FencedAllocator {
// Returns:
// the offset of the allocated memory block, or kInvalidOffset if out of
// memory.
- Offset Alloc(unsigned int size);
+ Offset Alloc(uint32_t size);
// Frees a block of memory.
//
@@ -78,15 +78,15 @@ class GPU_EXPORT FencedAllocator {
void FreeUnused();
// Gets the size of the largest free block that is available without waiting.
- unsigned int GetLargestFreeSize();
+ uint32_t GetLargestFreeSize();
// Gets the size of the largest free block that can be allocated if the
// caller can wait. Allocating a block of this size will succeed, but may
// block.
- unsigned int GetLargestFreeOrPendingSize();
+ uint32_t GetLargestFreeOrPendingSize();
// Gets the total size of all free blocks that are available without waiting.
- unsigned int GetFreeSize();
+ uint32_t GetFreeSize();
// Checks for consistency inside the book-keeping structures. Used for
// testing.
@@ -96,7 +96,7 @@ class GPU_EXPORT FencedAllocator {
bool InUseOrFreePending();
// Return bytes of memory that is IN_USE
- size_t bytes_in_use() const { return bytes_in_use_; }
+ uint32_t bytes_in_use() const { return bytes_in_use_; }
// Gets the status of a block, as well as the corresponding token if
// FREE_PENDING_TOKEN.
@@ -107,7 +107,7 @@ class GPU_EXPORT FencedAllocator {
struct Block {
State state;
Offset offset;
- unsigned int size;
+ uint32_t size;
int32_t token; // token to wait for in the FREE_PENDING_TOKEN case.
};
@@ -120,7 +120,7 @@ class GPU_EXPORT FencedAllocator {
};
typedef std::vector<Block> Container;
- typedef unsigned int BlockIndex;
+ typedef uint32_t BlockIndex;
static const int32_t kUnusedToken = 0;
@@ -142,11 +142,11 @@ class GPU_EXPORT FencedAllocator {
// NOTE: this will invalidate block indices.
// Returns the offset of the allocated block (NOTE: this is different from
// the other functions that return a block index).
- Offset AllocInBlock(BlockIndex index, unsigned int size);
+ Offset AllocInBlock(BlockIndex index, uint32_t size);
CommandBufferHelper *helper_;
Container blocks_;
- size_t bytes_in_use_;
+ uint32_t bytes_in_use_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocator);
};
@@ -155,9 +155,7 @@ class GPU_EXPORT FencedAllocator {
// instead of offsets.
class FencedAllocatorWrapper {
public:
- FencedAllocatorWrapper(unsigned int size,
- CommandBufferHelper* helper,
- void* base)
+ FencedAllocatorWrapper(uint32_t size, CommandBufferHelper* helper, void* base)
: allocator_(size, helper), base_(base) {}
// Allocates a block of memory. If the buffer is out of directly available
@@ -170,7 +168,7 @@ class FencedAllocatorWrapper {
// Returns:
// the pointer to the allocated memory block, or NULL if out of
// memory.
- void *Alloc(unsigned int size) {
+ void* Alloc(uint32_t size) {
FencedAllocator::Offset offset = allocator_.Alloc(size);
return GetPointer(offset);
}
@@ -186,7 +184,8 @@ class FencedAllocatorWrapper {
// Returns:
// the pointer to the allocated memory block, or NULL if out of
// memory.
- template <typename T> T *AllocTyped(unsigned int count) {
+ template <typename T>
+ T* AllocTyped(uint32_t count) {
return static_cast<T *>(Alloc(count * sizeof(T)));
}
@@ -232,18 +231,16 @@ class FencedAllocatorWrapper {
}
// Gets the size of the largest free block that is available without waiting.
- unsigned int GetLargestFreeSize() {
- return allocator_.GetLargestFreeSize();
- }
+ uint32_t GetLargestFreeSize() { return allocator_.GetLargestFreeSize(); }
// Gets the size of the largest free block that can be allocated if the
// caller can wait.
- unsigned int GetLargestFreeOrPendingSize() {
+ uint32_t GetLargestFreeOrPendingSize() {
return allocator_.GetLargestFreeOrPendingSize();
}
// Gets the total size of all free blocks.
- unsigned int GetFreeSize() { return allocator_.GetFreeSize(); }
+ uint32_t GetFreeSize() { return allocator_.GetFreeSize(); }
// Checks for consistency inside the book-keeping structures. Used for
// testing.
@@ -256,7 +253,7 @@ class FencedAllocatorWrapper {
FencedAllocator &allocator() { return allocator_; }
- size_t bytes_in_use() const { return allocator_.bytes_in_use(); }
+ uint32_t bytes_in_use() const { return allocator_.bytes_in_use(); }
FencedAllocator::State GetPointerStatusForTest(void* pointer,
int32_t* token_if_pending) {
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.h b/chromium/gpu/command_buffer/client/mapped_memory.h
index f11aa36a470..ef1a9495e5d 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory.h
+++ b/chromium/gpu/command_buffer/client/mapped_memory.h
@@ -101,9 +101,7 @@ class GPU_EXPORT MemoryChunk {
// Returns true of any memory in this chunk is in use or free pending token.
bool InUseOrFreePending() { return allocator_.InUseOrFreePending(); }
- size_t bytes_in_use() const {
- return allocator_.bytes_in_use();
- }
+ uint32_t bytes_in_use() const { return allocator_.bytes_in_use(); }
FencedAllocator::State GetPointerStatusForTest(void* pointer,
int32_t* token_if_pending) {