diff options
author | Michael BrĂ¼ning <michael.bruning@qt.io> | 2019-03-21 13:20:41 +0100 |
---|---|---|
committer | Michael BrĂ¼ning <michael.bruning@qt.io> | 2019-03-27 16:12:39 +0000 |
commit | 812a9e68a2cd59bb7e4353906cb68ed9dd44585b (patch) | |
tree | 7b18ecfb8ca8006ef21fbb7cb38f6f436e042791 | |
parent | 94f1317917fab94923e246016ec9f42913298065 (diff) | |
download | qtwebengine-chromium-812a9e68a2cd59bb7e4353906cb68ed9dd44585b.tar.gz |
[Backport] Security bug 905509 (3/13)
Manual backport of original patch by Antoine Labour
<piman@chromium.org>:
Change gpu::Buffer and gpu::BufferBacking to use uint32_t sizes
Because client and service may be of different bitness, offsets in
command buffers have to fit in a uint32_t, effectively limiting
transfer buffers to 4GB. This makes it clearer by enforcing it on IPC
boundaries and using uint32_t for the size, removing casts along the
way.
Bug: 905509
Change-Id: I90a539defac7cc029103b20e541798cc2698e65d
Reviewed-on: https://chromium-review.googlesource.com/c/1396861
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
10 files changed, 57 insertions, 71 deletions
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.cc b/chromium/gpu/command_buffer/client/mapped_memory.cc index 34580415bce..246cbb57fc1 100644 --- a/chromium/gpu/command_buffer/client/mapped_memory.cc +++ b/chromium/gpu/command_buffer/client/mapped_memory.cc @@ -37,9 +37,7 @@ MemoryChunk::MemoryChunk(int32_t shm_id, CommandBufferHelper* helper) : shm_id_(shm_id), shm_(shm), - allocator_(base::checked_cast<unsigned int>(shm->size()), - helper, - shm->memory()) {} + allocator_(shm->size(), helper, shm->memory()) {} MemoryChunk::~MemoryChunk() = default; diff --git a/chromium/gpu/command_buffer/client/mapped_memory.h b/chromium/gpu/command_buffer/client/mapped_memory.h index ee5f50a5504..f11aa36a470 100644 --- a/chromium/gpu/command_buffer/client/mapped_memory.h +++ b/chromium/gpu/command_buffer/client/mapped_memory.h @@ -31,25 +31,21 @@ class GPU_EXPORT MemoryChunk { ~MemoryChunk(); // Gets the size of the largest free block that is available without waiting. - unsigned int GetLargestFreeSizeWithoutWaiting() { + uint32_t GetLargestFreeSizeWithoutWaiting() { return allocator_.GetLargestFreeSize(); } // Gets the size of the largest free block that can be allocated if the // caller can wait. - unsigned int GetLargestFreeSizeWithWaiting() { + uint32_t GetLargestFreeSizeWithWaiting() { return allocator_.GetLargestFreeOrPendingSize(); } // Gets the size of the chunk. - unsigned int GetSize() const { - return static_cast<unsigned int>(shm_->size()); - } + uint32_t GetSize() const { return shm_->size(); } // The shared memory id for this chunk. - int32_t shm_id() const { - return shm_id_; - } + int32_t shm_id() const { return shm_id_; } gpu::Buffer* shared_memory() const { return shm_.get(); } @@ -63,15 +59,11 @@ class GPU_EXPORT MemoryChunk { // Returns: // the pointer to the allocated memory block, or NULL if out of // memory. - void* Alloc(unsigned int size) { - return allocator_.Alloc(size); - } + void* Alloc(uint32_t size) { return allocator_.Alloc(size); } // Gets the offset to a memory block given the base memory and the address. // It translates NULL to FencedAllocator::kInvalidOffset. - unsigned int GetOffset(void* pointer) { - return allocator_.GetOffset(pointer); - } + uint32_t GetOffset(void* pointer) { return allocator_.GetOffset(pointer); } // Frees a block of memory. // @@ -87,7 +79,7 @@ class GPU_EXPORT MemoryChunk { // Parameters: // pointer: the pointer to the memory block to free. // token: the token value to wait for before re-using the memory. - void FreePendingToken(void* pointer, unsigned int token) { + void FreePendingToken(void* pointer, uint32_t token) { allocator_.FreePendingToken(pointer, token); } @@ -97,7 +89,7 @@ class GPU_EXPORT MemoryChunk { } // Gets the free size of the chunk. - unsigned int GetFreeSize() { return allocator_.GetFreeSize(); } + uint32_t GetFreeSize() { return allocator_.GetFreeSize(); } // Returns true if pointer is in the range of this block. bool IsInChunk(void* pointer) const { @@ -140,11 +132,9 @@ class GPU_EXPORT MappedMemoryManager { ~MappedMemoryManager(); - unsigned int chunk_size_multiple() const { - return chunk_size_multiple_; - } + uint32_t chunk_size_multiple() const { return chunk_size_multiple_; } - void set_chunk_size_multiple(unsigned int multiple) { + void set_chunk_size_multiple(uint32_t multiple) { DCHECK(base::bits::IsPowerOfTwo(multiple)); DCHECK_GE(multiple, FencedAllocator::kAllocAlignment); chunk_size_multiple_ = multiple; @@ -165,8 +155,7 @@ class GPU_EXPORT MappedMemoryManager { // shm_offset: pointer to variable to receive the shared memory offset. // Returns: // pointer to allocated block of memory. NULL if failure. - void* Alloc( - unsigned int size, int32_t* shm_id, unsigned int* shm_offset); + void* Alloc(uint32_t size, int32_t* shm_id, uint32_t* shm_offset); // Frees a block of memory. // @@ -216,7 +205,7 @@ class GPU_EXPORT MappedMemoryManager { typedef std::vector<std::unique_ptr<MemoryChunk>> MemoryChunkVector; // size a chunk is rounded up to. - unsigned int chunk_size_multiple_; + uint32_t chunk_size_multiple_; CommandBufferHelper* helper_; MemoryChunkVector chunks_; size_t allocated_memory_; diff --git a/chromium/gpu/command_buffer/client/ring_buffer.cc b/chromium/gpu/command_buffer/client/ring_buffer.cc index df31208ccad..a3e1767c974 100644 --- a/chromium/gpu/command_buffer/client/ring_buffer.cc +++ b/chromium/gpu/command_buffer/client/ring_buffer.cc @@ -16,14 +16,14 @@ namespace gpu { -RingBuffer::RingBuffer(unsigned int alignment, +RingBuffer::RingBuffer(uint32_t alignment, Offset base_offset, - size_t size, + uint32_t size, CommandBufferHelper* helper, void* base) : helper_(helper), base_offset_(base_offset), - size_(base::checked_cast<unsigned int>(size)), + size_(size), free_offset_(0), in_use_offset_(0), alignment_(alignment), @@ -54,7 +54,7 @@ void RingBuffer::FreeOldestBlock() { blocks_.pop_front(); } -void* RingBuffer::Alloc(unsigned int size) { +void* RingBuffer::Alloc(uint32_t size) { DCHECK_LE(size, size_) << "attempt to allocate more than maximum memory"; DCHECK(blocks_.empty() || blocks_.back().state != IN_USE) << "Attempt to alloc another block before freeing the previous."; @@ -88,7 +88,7 @@ void* RingBuffer::Alloc(unsigned int size) { } void RingBuffer::FreePendingToken(void* pointer, - unsigned int token) { + uint32_t token) { Offset offset = GetOffset(pointer); offset -= base_offset_; DCHECK(!blocks_.empty()) << "no allocations to free"; @@ -146,13 +146,13 @@ void RingBuffer::DiscardBlock(void* pointer) { NOTREACHED() << "attempt to discard non-existant block"; } -unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() { - unsigned int size = GetLargestFreeSizeNoWaitingInternal(); +uint32_t RingBuffer::GetLargestFreeSizeNoWaiting() { + uint32_t size = GetLargestFreeSizeNoWaitingInternal(); DCHECK_EQ(size, RoundToAlignment(size)); return size; } -unsigned int RingBuffer::GetLargestFreeSizeNoWaitingInternal() { +uint32_t RingBuffer::GetLargestFreeSizeNoWaitingInternal() { while (!blocks_.empty()) { Block& block = blocks_.front(); if (!helper_->HasTokenPassed(block.token) || block.state == IN_USE) break; @@ -176,8 +176,8 @@ unsigned int RingBuffer::GetLargestFreeSizeNoWaitingInternal() { } } -unsigned int RingBuffer::GetTotalFreeSizeNoWaiting() { - unsigned int largest_free_size = GetLargestFreeSizeNoWaitingInternal(); +uint32_t RingBuffer::GetTotalFreeSizeNoWaiting() { + uint32_t largest_free_size = GetLargestFreeSizeNoWaitingInternal(); if (free_offset_ > in_use_offset_) { // It's free from free_offset_ to size_ and from 0 to in_use_offset_. return size_ - free_offset_ + in_use_offset_; @@ -186,7 +186,7 @@ unsigned int RingBuffer::GetTotalFreeSizeNoWaiting() { } } -void RingBuffer::ShrinkLastBlock(unsigned int new_size) { +void RingBuffer::ShrinkLastBlock(uint32_t new_size) { if (blocks_.empty()) return; auto& block = blocks_.back(); diff --git a/chromium/gpu/command_buffer/client/ring_buffer.h b/chromium/gpu/command_buffer/client/ring_buffer.h index 3b09125b72f..39b69f80001 100644 --- a/chromium/gpu/command_buffer/client/ring_buffer.h +++ b/chromium/gpu/command_buffer/client/ring_buffer.h @@ -22,7 +22,7 @@ class CommandBufferHelper; // allocations must not be kept past new allocations. class GPU_EXPORT RingBuffer { public: - typedef unsigned int Offset; + typedef uint32_t Offset; // Creates a RingBuffer. // Parameters: @@ -31,9 +31,9 @@ class GPU_EXPORT RingBuffer { // size: The size of the buffer in bytes. // helper: A CommandBufferHelper for dealing with tokens. // base: The physical address that corresponds to base_offset. - RingBuffer(unsigned int alignment, + RingBuffer(uint32_t alignment, Offset base_offset, - size_t size, + uint32_t size, CommandBufferHelper* helper, void* base); @@ -48,7 +48,7 @@ class GPU_EXPORT RingBuffer { // // Returns: // the pointer to the allocated memory block. - void* Alloc(unsigned int size); + void* Alloc(uint32_t size); // Frees a block of memory, pending the passage of a token. That memory won't // be re-allocated until the token has passed through the command stream. @@ -56,7 +56,7 @@ class GPU_EXPORT RingBuffer { // Parameters: // pointer: the pointer to the memory block to free. // token: the token value to wait for before re-using the memory. - void FreePendingToken(void* pointer, unsigned int token); + void FreePendingToken(void* pointer, uint32_t token); // Discards a block within the ring buffer. // @@ -65,15 +65,15 @@ class GPU_EXPORT RingBuffer { void DiscardBlock(void* pointer); // Gets the size of the largest free block that is available without waiting. - unsigned int GetLargestFreeSizeNoWaiting(); + uint32_t GetLargestFreeSizeNoWaiting(); // Gets the total size of all free blocks that are available without waiting. - unsigned int GetTotalFreeSizeNoWaiting(); + uint32_t GetTotalFreeSizeNoWaiting(); // Gets the size of the largest free block that can be allocated if the // caller can wait. Allocating a block of this size will succeed, but may // block. - unsigned int GetLargestFreeOrPendingSize() { + uint32_t GetLargestFreeOrPendingSize() { // If size_ is not a multiple of alignment_, then trying to allocate it will // cause us to try to allocate more than we actually can due to rounding up. // So, round down here. @@ -81,7 +81,7 @@ class GPU_EXPORT RingBuffer { } // Total size minus usable size. - unsigned int GetUsedSize() { return size_ - GetLargestFreeSizeNoWaiting(); } + uint32_t GetUsedSize() { return size_ - GetLargestFreeSizeNoWaiting(); } // Gets a pointer to a memory block given the base memory and the offset. void* GetPointer(RingBuffer::Offset offset) const { @@ -94,13 +94,13 @@ class GPU_EXPORT RingBuffer { } // Rounds the given size to the alignment in use. - unsigned int RoundToAlignment(unsigned int size) { + uint32_t RoundToAlignment(uint32_t size) { return (size + alignment_ - 1) & ~(alignment_ - 1); } // Shrinks the last block. new_size must be smaller than the current size // and the block must still be in use in order to shrink. - void ShrinkLastBlock(unsigned int new_size); + void ShrinkLastBlock(uint32_t new_size); private: enum State { @@ -110,23 +110,19 @@ class GPU_EXPORT RingBuffer { }; // Book-keeping sturcture that describes a block of memory. struct Block { - Block(Offset _offset, unsigned int _size, State _state) - : offset(_offset), - size(_size), - token(0), - state(_state) { - } + Block(Offset _offset, uint32_t _size, State _state) + : offset(_offset), size(_size), token(0), state(_state) {} Offset offset; - unsigned int size; - unsigned int token; // token to wait for. + uint32_t size; + uint32_t token; // token to wait for. State state; }; using Container = base::circular_deque<Block>; - using BlockIndex = unsigned int; + using BlockIndex = uint32_t; void FreeOldestBlock(); - unsigned int GetLargestFreeSizeNoWaitingInternal(); + uint32_t GetLargestFreeSizeNoWaitingInternal(); CommandBufferHelper* helper_; @@ -147,7 +143,7 @@ class GPU_EXPORT RingBuffer { Offset in_use_offset_; // Alignment for allocations. - unsigned int alignment_; + uint32_t alignment_; // The physical address that corresponds to base_offset. void* base_; diff --git a/chromium/gpu/command_buffer/common/buffer.cc b/chromium/gpu/command_buffer/common/buffer.cc index 1aa3bffe2bf..ffacfea6cd6 100644 --- a/chromium/gpu/command_buffer/common/buffer.cc +++ b/chromium/gpu/command_buffer/common/buffer.cc @@ -30,6 +30,7 @@ SharedMemoryBufferBacking::SharedMemoryBufferBacking( : shared_memory_region_(std::move(shared_memory_region)), shared_memory_mapping_(std::move(shared_memory_mapping)) { DCHECK_EQ(shared_memory_region_.GetGUID(), shared_memory_mapping_.guid()); + DCHECK_EQ(shared_memory_mapping_.size(), static_cast<size_t>(UINT32_MAX)); } SharedMemoryBufferBacking::~SharedMemoryBufferBacking() = default; @@ -47,8 +48,8 @@ void* SharedMemoryBufferBacking::GetMemory() const { return shared_memory_mapping_.memory(); } -size_t SharedMemoryBufferBacking::GetSize() const { - return shared_memory_mapping_.size(); +uint32_t SharedMemoryBufferBacking::GetSize() const { + return static_cast<uint32_t>(shared_memory_mapping_.size()); } Buffer::Buffer(std::unique_ptr<BufferBacking> backing) diff --git a/chromium/gpu/command_buffer/common/buffer.h b/chromium/gpu/command_buffer/common/buffer.h index db89809e611..aef6755bf63 100644 --- a/chromium/gpu/command_buffer/common/buffer.h +++ b/chromium/gpu/command_buffer/common/buffer.h @@ -24,7 +24,7 @@ class GPU_EXPORT BufferBacking { virtual const base::UnsafeSharedMemoryRegion& shared_memory_region() const; virtual base::UnguessableToken GetGUID() const; virtual void* GetMemory() const = 0; - virtual size_t GetSize() const = 0; + virtual uint32_t GetSize() const = 0; }; class GPU_EXPORT SharedMemoryBufferBacking : public BufferBacking { @@ -36,7 +36,7 @@ class GPU_EXPORT SharedMemoryBufferBacking : public BufferBacking { const base::UnsafeSharedMemoryRegion& shared_memory_region() const override; base::UnguessableToken GetGUID() const override; void* GetMemory() const override; - size_t GetSize() const override; + uint32_t GetSize() const override; private: base::UnsafeSharedMemoryRegion shared_memory_region_; @@ -51,7 +51,7 @@ class GPU_EXPORT Buffer : public base::RefCountedThreadSafe<Buffer> { BufferBacking* backing() const { return backing_.get(); } void* memory() const { return memory_; } - size_t size() const { return size_; } + uint32t_t size() const { return size_; } // Returns NULL if the address overflows the memory. void* GetDataAddress(uint32_t data_offset, uint32_t data_size) const; @@ -68,7 +68,7 @@ class GPU_EXPORT Buffer : public base::RefCountedThreadSafe<Buffer> { std::unique_ptr<BufferBacking> backing_; void* memory_; - size_t size_; + uint32_t size_; DISALLOW_COPY_AND_ASSIGN(Buffer); }; diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.cc b/chromium/gpu/command_buffer/service/command_buffer_service.cc index aa2f6aae3d3..679438b1500 100644 --- a/chromium/gpu/command_buffer/service/command_buffer_service.cc +++ b/chromium/gpu/command_buffer/service/command_buffer_service.cc @@ -22,15 +22,15 @@ namespace { class MemoryBufferBacking : public BufferBacking { public: - explicit MemoryBufferBacking(size_t size) + explicit MemoryBufferBacking(uint32_t size) : memory_(new char[size]), size_(size) {} ~MemoryBufferBacking() override = default; void* GetMemory() const override { return memory_.get(); } - size_t GetSize() const override { return size_; } + uint32_t GetSize() const override { return size_; } private: std::unique_ptr<char[]> memory_; - size_t size_; + uint32_t size_; DISALLOW_COPY_AND_ASSIGN(MemoryBufferBacking); }; @@ -124,7 +124,7 @@ void CommandBufferService::SetGetBuffer(int32_t transfer_buffer_id) { // This means ring_buffer_ can be NULL. ring_buffer_ = GetTransferBuffer(transfer_buffer_id); if (ring_buffer_) { - int32_t size = ring_buffer_->size(); + uint32_t size = ring_buffer_->size(); volatile void* memory = ring_buffer_->memory(); // check proper alignments. DCHECK_EQ( diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc index 3685d92ddf1..96cbeae791e 100644 --- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc +++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc @@ -383,6 +383,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( OnClientError(gpu::error::kOutOfBounds); return nullptr; } + DCHECK_LE(shared_memory_mapping.size(), static_cast<size_t>(UINT32_MAX)); if (last_state_.error == gpu::error::kNoError) { base::UnsafeSharedMemoryRegion region = diff --git a/chromium/gpu/ipc/service/command_buffer_stub.cc b/chromium/gpu/ipc/service/command_buffer_stub.cc index 195ea0fcf25..3b0c94a55ce 100644 --- a/chromium/gpu/ipc/service/command_buffer_stub.cc +++ b/chromium/gpu/ipc/service/command_buffer_stub.cc @@ -629,7 +629,7 @@ void CommandBufferStub::OnRegisterTransferBuffer( // Map the shared memory into this process. base::WritableSharedMemoryMapping mapping = transfer_buffer.Map(); - if (!mapping.IsValid()) { + if (!mapping.IsValid() || (mapping.size() > UINT32_MAX)) { DVLOG(0) << "Failed to map shared memory."; return; } diff --git a/chromium/ppapi/proxy/ppapi_command_buffer_proxy.cc b/chromium/ppapi/proxy/ppapi_command_buffer_proxy.cc index be9abe8def3..6737c6e7067 100644 --- a/chromium/ppapi/proxy/ppapi_command_buffer_proxy.cc +++ b/chromium/ppapi/proxy/ppapi_command_buffer_proxy.cc @@ -147,7 +147,8 @@ scoped_refptr<gpu::Buffer> PpapiCommandBufferProxy::CreateTransferBuffer( base::WritableSharedMemoryMapping shared_memory_mapping = shared_memory_region.Map(); - if (!shared_memory_mapping.IsValid()) { + if (!shared_memory_mapping.IsValid() || + (shared_memory_mapping.size() > UINT32_MAX)) { if (last_state_.error == gpu::error::kNoError) last_state_.error = gpu::error::kOutOfBounds; *id = -1; |