summaryrefslogtreecommitdiff
path: root/chromium/net/disk_cache
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-01-29 16:35:13 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-02-01 15:33:35 +0000
commitc8c2d1901aec01e934adf561a9fdf0cc776cdef8 (patch)
tree9157c3d9815e5870799e070b113813bec53e0535 /chromium/net/disk_cache
parentabefd5095b41dac94ca451d784ab6e27372e981a (diff)
downloadqtwebengine-chromium-c8c2d1901aec01e934adf561a9fdf0cc776cdef8.tar.gz
BASELINE: Update Chromium to 64.0.3282.139
Change-Id: I1cae68fe9c94ff7608b26b8382fc19862cdb293a Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/net/disk_cache')
-rw-r--r--chromium/net/disk_cache/OWNERS3
-rw-r--r--chromium/net/disk_cache/backend_cleanup_tracker_unittest.cc2
-rw-r--r--chromium/net/disk_cache/backend_unittest.cc18
-rw-r--r--chromium/net/disk_cache/blockfile/block_files.cc6
-rw-r--r--chromium/net/disk_cache/blockfile/disk_format_base.h2
-rw-r--r--chromium/net/disk_cache/blockfile/entry_impl.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/eviction.cc3
-rw-r--r--chromium/net/disk_cache/blockfile/file_posix.cc6
-rw-r--r--chromium/net/disk_cache/blockfile/in_flight_backend_io.cc5
-rw-r--r--chromium/net/disk_cache/blockfile/in_flight_io.cc6
-rw-r--r--chromium/net/disk_cache/blockfile/mapped_file_unittest.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/rankings.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/sparse_control.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/sparse_control.h2
-rw-r--r--chromium/net/disk_cache/blockfile/stats.cc6
-rw-r--r--chromium/net/disk_cache/disk_cache.cc6
-rw-r--r--chromium/net/disk_cache/disk_cache_perftest.cc4
-rw-r--r--chromium/net/disk_cache/disk_cache_test_base.cc42
-rw-r--r--chromium/net/disk_cache/disk_cache_test_base.h17
-rw-r--r--chromium/net/disk_cache/disk_cache_test_util.cc5
-rw-r--r--chromium/net/disk_cache/entry_unittest.cc113
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_impl.cc60
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_impl.h7
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.cc70
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.h17
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_operation.cc2
-rw-r--r--chromium/net/disk_cache/simple/simple_file_tracker.cc206
-rw-r--r--chromium/net/disk_cache/simple/simple_file_tracker.h183
-rw-r--r--chromium/net/disk_cache/simple/simple_file_tracker_unittest.cc221
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file.cc8
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file_unittest.cc5
-rw-r--r--chromium/net/disk_cache/simple/simple_index_unittest.cc10
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.cc410
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.h77
-rw-r--r--chromium/net/disk_cache/simple/simple_util.cc18
-rw-r--r--chromium/net/disk_cache/simple/simple_util.h11
-rw-r--r--chromium/net/disk_cache/simple/simple_util_win.cc2
37 files changed, 1259 insertions, 302 deletions
diff --git a/chromium/net/disk_cache/OWNERS b/chromium/net/disk_cache/OWNERS
index b11ae2ed77f..854e7d8ff3d 100644
--- a/chromium/net/disk_cache/OWNERS
+++ b/chromium/net/disk_cache/OWNERS
@@ -1 +1,4 @@
+morlovich@chromium.org
+jkarlin@chromium.org
+
# COMPONENT: Internals>Network>Cache
diff --git a/chromium/net/disk_cache/backend_cleanup_tracker_unittest.cc b/chromium/net/disk_cache/backend_cleanup_tracker_unittest.cc
index 9f9a92352be..1e1aec6dc58 100644
--- a/chromium/net/disk_cache/backend_cleanup_tracker_unittest.cc
+++ b/chromium/net/disk_cache/backend_cleanup_tracker_unittest.cc
@@ -21,7 +21,7 @@ using testing::IsEmpty;
class BackendCleanupTrackerTest : public testing::Test {
protected:
- BackendCleanupTrackerTest() {}
+ BackendCleanupTrackerTest() = default;
void SetUp() override {
testing::Test::SetUp();
diff --git a/chromium/net/disk_cache/backend_unittest.cc b/chromium/net/disk_cache/backend_unittest.cc
index c6262d92f7d..80057fc25d5 100644
--- a/chromium/net/disk_cache/backend_unittest.cc
+++ b/chromium/net/disk_cache/backend_unittest.cc
@@ -14,6 +14,7 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/test/mock_entropy_provider.h"
+#include "base/test/scoped_task_environment.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_restrictions.h"
@@ -42,6 +43,7 @@
#include "net/disk_cache/simple/simple_test_util.h"
#include "net/disk_cache/simple/simple_util.h"
#include "net/test/gtest_util.h"
+#include "net/test/net_test_suite.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -3256,8 +3258,9 @@ TEST_F(DiskCacheBackendTest, MemoryOnlyBackendEviction) {
BackendEviction();
}
-// TODO(gavinp): Enable BackendEviction test for simple cache after performance
-// problems are addressed. See crbug.com/588184 for more information.
+// TODO(morlovich): Enable BackendEviction test for simple cache after
+// performance problems are addressed. See crbug.com/588184 for more
+// information.
// This overly specific looking test is a regression test aimed at
// crbug.com/589186.
@@ -3905,7 +3908,7 @@ TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
// Check that the |SimpleBackendImpl| does not favor this structure.
disk_cache::SimpleBackendImpl* simple_cache =
- new disk_cache::SimpleBackendImpl(cache_path_, nullptr, 0,
+ new disk_cache::SimpleBackendImpl(cache_path_, nullptr, nullptr, 0,
net::DISK_CACHE, nullptr, nullptr);
net::TestCompletionCallback cb;
int rv = simple_cache->Init(cb.callback());
@@ -4013,15 +4016,11 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
EXPECT_TRUE(keys_to_match.empty());
}
-// This test is flaky on Android Marshmallow crbug.com/638891.
-#if !defined(OS_ANDROID)
// Tests that enumerations are not affected by corrupt files.
TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
SetSimpleCacheMode();
InitCache();
- // Create a corrupt entry. The write/read sequence ensures that the entry will
- // have been created before corrupting the platform files, in the case of
- // optimistic operations.
+ // Create a corrupt entry.
const std::string key = "the key";
disk_cache::Entry* corrupted_entry;
@@ -4034,6 +4033,8 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
corrupted_entry->Close();
+ // Let all I/O finish so it doesn't race with corrupting the file below.
+ NetTestSuite::GetScopedTaskEnvironment()->RunUntilIdle();
std::set<std::string> key_pool;
ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
@@ -4052,7 +4053,6 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
EXPECT_EQ(key_pool.size(), count);
EXPECT_TRUE(keys_to_match.empty());
}
-#endif
// Tests that enumerations don't leak memory when the backend is destructed
// mid-enumeration.
diff --git a/chromium/net/disk_cache/blockfile/block_files.cc b/chromium/net/disk_cache/blockfile/block_files.cc
index 78d759604a8..eac68fbc25f 100644
--- a/chromium/net/disk_cache/blockfile/block_files.cc
+++ b/chromium/net/disk_cache/blockfile/block_files.cc
@@ -49,11 +49,9 @@ BlockHeader::BlockHeader(MappedFile* file)
: header_(reinterpret_cast<BlockFileHeader*>(file->buffer())) {
}
-BlockHeader::BlockHeader(const BlockHeader& other) : header_(other.header_) {
-}
+BlockHeader::BlockHeader(const BlockHeader& other) = default;
-BlockHeader::~BlockHeader() {
-}
+BlockHeader::~BlockHeader() = default;
bool BlockHeader::CreateMapBlock(int size, int* index) {
DCHECK(size > 0 && size <= kMaxNumBlocks);
diff --git a/chromium/net/disk_cache/blockfile/disk_format_base.h b/chromium/net/disk_cache/blockfile/disk_format_base.h
index 88dcfe091c0..9d70e5ad9b2 100644
--- a/chromium/net/disk_cache/blockfile/disk_format_base.h
+++ b/chromium/net/disk_cache/blockfile/disk_format_base.h
@@ -106,7 +106,7 @@ struct SparseHeader {
uint32_t magic; // Structure identifier (equal to kIndexMagic).
int32_t parent_key_len; // Key length for the parent entry.
int32_t last_block; // Index of the last written block.
- int32_t last_block_len; // Lenght of the last written block.
+ int32_t last_block_len; // Length of the last written block.
int32_t dummy[10];
};
diff --git a/chromium/net/disk_cache/blockfile/entry_impl.cc b/chromium/net/disk_cache/blockfile/entry_impl.cc
index 5cd75f007c6..16e49b9787e 100644
--- a/chromium/net/disk_cache/blockfile/entry_impl.cc
+++ b/chromium/net/disk_cache/blockfile/entry_impl.cc
@@ -52,7 +52,7 @@ class SyncCallback: public disk_cache::FileIOCallback {
end_event_type_(end_event_type) {
entry_->IncrementIoCount();
}
- ~SyncCallback() override {}
+ ~SyncCallback() override = default;
void OnFileIOComplete(int bytes_copied) override;
void Discard();
diff --git a/chromium/net/disk_cache/blockfile/eviction.cc b/chromium/net/disk_cache/blockfile/eviction.cc
index 821be2da0b3..7ff205e284d 100644
--- a/chromium/net/disk_cache/blockfile/eviction.cc
+++ b/chromium/net/disk_cache/blockfile/eviction.cc
@@ -85,8 +85,7 @@ Eviction::Eviction()
ptr_factory_(this) {
}
-Eviction::~Eviction() {
-}
+Eviction::~Eviction() = default;
void Eviction::Init(BackendImpl* backend) {
// We grab a bunch of info from the backend to make the code a little cleaner
diff --git a/chromium/net/disk_cache/blockfile/file_posix.cc b/chromium/net/disk_cache/blockfile/file_posix.cc
index 1297cf07144..69058f1f28c 100644
--- a/chromium/net/disk_cache/blockfile/file_posix.cc
+++ b/chromium/net/disk_cache/blockfile/file_posix.cc
@@ -31,7 +31,7 @@ class FileWorkerPool : public base::SequencedWorkerPool {
base::TaskPriority::USER_BLOCKING) {}
protected:
- ~FileWorkerPool() override {}
+ ~FileWorkerPool() override = default;
};
base::LazyInstance<FileWorkerPool>::Leaky s_worker_pool =
@@ -162,9 +162,7 @@ void File::WaitForPendingIO(int* num_pending_io) {
void File::DropPendingIO() {
}
-
-File::~File() {
-}
+File::~File() = default;
base::PlatformFile File::platform_file() const {
return base_file_.GetPlatformFile();
diff --git a/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc b/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc
index 2e7f7c6db08..2ea9b819ecf 100644
--- a/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc
+++ b/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc
@@ -229,7 +229,7 @@ void BackendIO::ReadyForSparseIO(EntryImpl* entry) {
entry_ = entry;
}
-BackendIO::~BackendIO() {}
+BackendIO::~BackendIO() = default;
bool BackendIO::ReturnsEntry() {
return operation_ == OP_OPEN || operation_ == OP_CREATE ||
@@ -364,8 +364,7 @@ InFlightBackendIO::InFlightBackendIO(
ptr_factory_(this) {
}
-InFlightBackendIO::~InFlightBackendIO() {
-}
+InFlightBackendIO::~InFlightBackendIO() = default;
void InFlightBackendIO::Init(const net::CompletionCallback& callback) {
scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
diff --git a/chromium/net/disk_cache/blockfile/in_flight_io.cc b/chromium/net/disk_cache/blockfile/in_flight_io.cc
index 7fdd56bb97c..b8fee8c4214 100644
--- a/chromium/net/disk_cache/blockfile/in_flight_io.cc
+++ b/chromium/net/disk_cache/blockfile/in_flight_io.cc
@@ -32,8 +32,7 @@ void BackgroundIO::Cancel() {
controller_ = NULL;
}
-BackgroundIO::~BackgroundIO() {
-}
+BackgroundIO::~BackgroundIO() = default;
// ---------------------------------------------------------------------------
@@ -41,8 +40,7 @@ InFlightIO::InFlightIO()
: callback_task_runner_(base::ThreadTaskRunnerHandle::Get()),
running_(false) {}
-InFlightIO::~InFlightIO() {
-}
+InFlightIO::~InFlightIO() = default;
// Runs on the background thread.
void BackgroundIO::NotifyController() {
diff --git a/chromium/net/disk_cache/blockfile/mapped_file_unittest.cc b/chromium/net/disk_cache/blockfile/mapped_file_unittest.cc
index 1c864513c39..f5fd0f50626 100644
--- a/chromium/net/disk_cache/blockfile/mapped_file_unittest.cc
+++ b/chromium/net/disk_cache/blockfile/mapped_file_unittest.cc
@@ -19,7 +19,7 @@ class FileCallbackTest: public disk_cache::FileIOCallback {
helper_(helper),
max_id_(max_id) {
}
- ~FileCallbackTest() override {}
+ ~FileCallbackTest() override = default;
void OnFileIOComplete(int bytes_copied) override;
diff --git a/chromium/net/disk_cache/blockfile/rankings.cc b/chromium/net/disk_cache/blockfile/rankings.cc
index c0d023c7bc1..68d2c3be92d 100644
--- a/chromium/net/disk_cache/blockfile/rankings.cc
+++ b/chromium/net/disk_cache/blockfile/rankings.cc
@@ -221,7 +221,7 @@ void Rankings::Iterator::Reset() {
Rankings::Rankings() : init_(false) {}
-Rankings::~Rankings() {}
+Rankings::~Rankings() = default;
bool Rankings::Init(BackendImpl* backend, bool count_lists) {
DCHECK(!init_);
diff --git a/chromium/net/disk_cache/blockfile/sparse_control.cc b/chromium/net/disk_cache/blockfile/sparse_control.cc
index d24e664d2d4..3391533b40c 100644
--- a/chromium/net/disk_cache/blockfile/sparse_control.cc
+++ b/chromium/net/disk_cache/blockfile/sparse_control.cc
@@ -74,7 +74,7 @@ class ChildrenDeleter
private:
friend class base::RefCounted<ChildrenDeleter>;
- ~ChildrenDeleter() override {}
+ ~ChildrenDeleter() override = default;
void DeleteChildren();
diff --git a/chromium/net/disk_cache/blockfile/sparse_control.h b/chromium/net/disk_cache/blockfile/sparse_control.h
index f6bf2a34d5f..d5c81ad71d0 100644
--- a/chromium/net/disk_cache/blockfile/sparse_control.h
+++ b/chromium/net/disk_cache/blockfile/sparse_control.h
@@ -29,7 +29,7 @@ class EntryImpl;
// This class provides support for the sparse capabilities of the disk cache.
// Basically, sparse IO is directed from EntryImpl to this class, and we split
// the operation into multiple small pieces, sending each one to the
-// appropriate entry. An instance of this class is asociated with each entry
+// appropriate entry. An instance of this class is associated with each entry
// used directly for sparse operations (the entry passed in to the constructor).
class SparseControl {
public:
diff --git a/chromium/net/disk_cache/blockfile/stats.cc b/chromium/net/disk_cache/blockfile/stats.cc
index a371d287b6a..72e15e71aef 100644
--- a/chromium/net/disk_cache/blockfile/stats.cc
+++ b/chromium/net/disk_cache/blockfile/stats.cc
@@ -92,11 +92,9 @@ bool VerifyStats(OnDiskStats* stats) {
return true;
}
-Stats::Stats() {
-}
+Stats::Stats() = default;
-Stats::~Stats() {
-}
+Stats::~Stats() = default;
bool Stats::Init(void* data, int num_bytes, Addr address) {
OnDiskStats local_stats;
diff --git a/chromium/net/disk_cache/disk_cache.cc b/chromium/net/disk_cache/disk_cache.cc
index da40ac6e48c..94551a40a4b 100644
--- a/chromium/net/disk_cache/disk_cache.cc
+++ b/chromium/net/disk_cache/disk_cache.cc
@@ -94,7 +94,7 @@ CacheCreator::CacheCreator(
net_log_(net_log) {
}
-CacheCreator::~CacheCreator() {}
+CacheCreator::~CacheCreator() = default;
int CacheCreator::Run() {
#if defined(OS_ANDROID)
@@ -107,8 +107,8 @@ int CacheCreator::Run() {
kSimpleBackendIsDefault)) {
disk_cache::SimpleBackendImpl* simple_cache =
new disk_cache::SimpleBackendImpl(
- path_, cleanup_tracker_.get(), max_bytes_, type_,
- /* cache_thread = */ nullptr, net_log_);
+ path_, cleanup_tracker_.get(), /* file_tracker = */ nullptr,
+ max_bytes_, type_, /* cache_thread = */ nullptr, net_log_);
created_cache_.reset(simple_cache);
return simple_cache->Init(
base::Bind(&CacheCreator::OnIOComplete, base::Unretained(this)));
diff --git a/chromium/net/disk_cache/disk_cache_perftest.cc b/chromium/net/disk_cache/disk_cache_perftest.cc
index 52667df95aa..d5e9431eb90 100644
--- a/chromium/net/disk_cache/disk_cache_perftest.cc
+++ b/chromium/net/disk_cache/disk_cache_perftest.cc
@@ -53,7 +53,9 @@ struct TestEntry {
class DiskCachePerfTest : public DiskCacheTestWithCache {
public:
- DiskCachePerfTest() : saved_fd_limit_(base::GetMaxFds()) {
+ DiskCachePerfTest()
+ : DiskCacheTestWithCache(&scoped_task_environment_),
+ saved_fd_limit_(base::GetMaxFds()) {
if (saved_fd_limit_ < kFdLimitForCacheTests)
MaybeSetFdLimit(kFdLimitForCacheTests);
}
diff --git a/chromium/net/disk_cache/disk_cache_test_base.cc b/chromium/net/disk_cache/disk_cache_test_base.cc
index f0b792fb6b6..727dbb8fa9f 100644
--- a/chromium/net/disk_cache/disk_cache_test_base.cc
+++ b/chromium/net/disk_cache/disk_cache_test_base.cc
@@ -11,6 +11,7 @@
#include "base/path_service.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/test/scoped_task_environment.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "net/base/io_buffer.h"
@@ -23,8 +24,10 @@
#include "net/disk_cache/disk_cache_test_util.h"
#include "net/disk_cache/memory/mem_backend_impl.h"
#include "net/disk_cache/simple/simple_backend_impl.h"
+#include "net/disk_cache/simple/simple_file_tracker.h"
#include "net/disk_cache/simple/simple_index.h"
#include "net/test/gtest_util.h"
+#include "net/test/net_test_suite.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -35,8 +38,7 @@ DiskCacheTest::DiskCacheTest() {
cache_path_ = temp_dir_.GetPath();
}
-DiskCacheTest::~DiskCacheTest() {
-}
+DiskCacheTest::~DiskCacheTest() = default;
bool DiskCacheTest::CopyTestCache(const std::string& name) {
base::FilePath path;
@@ -63,7 +65,7 @@ DiskCacheTestWithCache::TestIterator::TestIterator(
std::unique_ptr<disk_cache::Backend::Iterator> iterator)
: iterator_(std::move(iterator)) {}
-DiskCacheTestWithCache::TestIterator::~TestIterator() {}
+DiskCacheTestWithCache::TestIterator::~TestIterator() = default;
int DiskCacheTestWithCache::TestIterator::OpenNextEntry(
disk_cache::Entry** next_entry) {
@@ -73,6 +75,10 @@ int DiskCacheTestWithCache::TestIterator::OpenNextEntry(
}
DiskCacheTestWithCache::DiskCacheTestWithCache()
+ : DiskCacheTestWithCache(NetTestSuite::GetScopedTaskEnvironment()) {}
+
+DiskCacheTestWithCache::DiskCacheTestWithCache(
+ base::test::ScopedTaskEnvironment* scoped_task_env)
: cache_impl_(NULL),
simple_cache_impl_(NULL),
mem_cache_(NULL),
@@ -86,9 +92,10 @@ DiskCacheTestWithCache::DiskCacheTestWithCache()
new_eviction_(false),
first_cleanup_(true),
integrity_(true),
- use_current_thread_(false) {}
+ use_current_thread_(false),
+ scoped_task_env_(scoped_task_env) {}
-DiskCacheTestWithCache::~DiskCacheTestWithCache() {}
+DiskCacheTestWithCache::~DiskCacheTestWithCache() = default;
void DiskCacheTestWithCache::InitCache() {
if (memory_only_)
@@ -276,17 +283,21 @@ void DiskCacheTestWithCache::AddDelay() {
}
void DiskCacheTestWithCache::TearDown() {
- base::RunLoop().RunUntilIdle();
- disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
- base::RunLoop().RunUntilIdle();
+ scoped_task_env_->RunUntilIdle();
cache_.reset();
if (!memory_only_ && !simple_cache_mode_ && integrity_) {
EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
}
- base::RunLoop().RunUntilIdle();
- disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
+ scoped_task_env_->RunUntilIdle();
+ if (simple_cache_mode_ && simple_file_tracker_)
+ EXPECT_TRUE(simple_file_tracker_->IsEmptyForTesting());
+
DiskCacheTest::TearDown();
+
+ // We are documented as not keeping this past TearDown, and net_perftests
+ // is written under this assumption.
+ scoped_task_env_ = nullptr;
}
void DiskCacheTestWithCache::InitMemoryCache() {
@@ -316,10 +327,13 @@ void DiskCacheTestWithCache::CreateBackend(uint32_t flags) {
if (simple_cache_mode_) {
net::TestCompletionCallback cb;
- std::unique_ptr<disk_cache::SimpleBackendImpl> simple_backend(
- new disk_cache::SimpleBackendImpl(
- cache_path_, /* cleanup_tracker = */ nullptr, size_, type_, runner,
- /*net_log = */ nullptr));
+ if (!simple_file_tracker_)
+ simple_file_tracker_ = std::make_unique<disk_cache::SimpleFileTracker>();
+ std::unique_ptr<disk_cache::SimpleBackendImpl> simple_backend =
+ std::make_unique<disk_cache::SimpleBackendImpl>(
+ cache_path_, /* cleanup_tracker = */ nullptr,
+ simple_file_tracker_.get(), size_, type_, runner,
+ /*net_log = */ nullptr);
int rv = simple_backend->Init(cb.callback());
ASSERT_THAT(cb.GetResult(rv), IsOk());
simple_cache_impl_ = simple_backend.get();
diff --git a/chromium/net/disk_cache/disk_cache_test_base.h b/chromium/net/disk_cache/disk_cache_test_base.h
index d73095a9e80..cfa463cbeb1 100644
--- a/chromium/net/disk_cache/disk_cache_test_base.h
+++ b/chromium/net/disk_cache/disk_cache_test_base.h
@@ -18,6 +18,14 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
+namespace base {
+namespace test {
+
+class ScopedTaskEnvironment;
+
+} // namespace test
+} // namespace base
+
namespace net {
class IOBuffer;
@@ -31,6 +39,7 @@ class BackendImpl;
class Entry;
class MemBackendImpl;
class SimpleBackendImpl;
+class SimpleFileTracker;
} // namespace disk_cache
@@ -72,7 +81,13 @@ class DiskCacheTestWithCache : public DiskCacheTest {
std::unique_ptr<disk_cache::Backend::Iterator> iterator_;
};
+ // Assumes NetTestSuite is available.
DiskCacheTestWithCache();
+
+ // Does not take ownership of |scoped_task_env|, and will not use it past
+ // TearDown(). Does not require NetTestSuite.
+ explicit DiskCacheTestWithCache(
+ base::test::ScopedTaskEnvironment* scoped_task_env);
~DiskCacheTestWithCache() override;
void CreateBackend(uint32_t flags);
@@ -168,6 +183,7 @@ class DiskCacheTestWithCache : public DiskCacheTest {
// initialized. The implementation pointers can be NULL.
std::unique_ptr<disk_cache::Backend> cache_;
disk_cache::BackendImpl* cache_impl_;
+ std::unique_ptr<disk_cache::SimpleFileTracker> simple_file_tracker_;
disk_cache::SimpleBackendImpl* simple_cache_impl_;
disk_cache::MemBackendImpl* mem_cache_;
@@ -188,6 +204,7 @@ class DiskCacheTestWithCache : public DiskCacheTest {
private:
void InitMemoryCache();
void InitDiskCache();
+ base::test::ScopedTaskEnvironment* scoped_task_env_;
DISALLOW_COPY_AND_ASSIGN(DiskCacheTestWithCache);
};
diff --git a/chromium/net/disk_cache/disk_cache_test_util.cc b/chromium/net/disk_cache/disk_cache_test_util.cc
index 426aeab7b22..69314c5e544 100644
--- a/chromium/net/disk_cache/disk_cache_test_util.cc
+++ b/chromium/net/disk_cache/disk_cache_test_util.cc
@@ -84,7 +84,7 @@ MessageLoopHelper::MessageLoopHelper()
callback_reused_error_(false),
callbacks_called_(0) {}
-MessageLoopHelper::~MessageLoopHelper() {}
+MessageLoopHelper::~MessageLoopHelper() = default;
bool MessageLoopHelper::WaitUntilCacheIoFinished(int num_callbacks) {
if (num_callbacks == callbacks_called_)
@@ -128,8 +128,7 @@ CallbackTest::CallbackTest(MessageLoopHelper* helper,
reuse_(reuse ? 0 : 1) {
}
-CallbackTest::~CallbackTest() {
-}
+CallbackTest::~CallbackTest() = default;
// On the actual callback, increase the number of tests received and check for
// errors (an unexpected test received)
diff --git a/chromium/net/disk_cache/entry_unittest.cc b/chromium/net/disk_cache/entry_unittest.cc
index 8b7d3c27140..ae97a761cb6 100644
--- a/chromium/net/disk_cache/entry_unittest.cc
+++ b/chromium/net/disk_cache/entry_unittest.cc
@@ -3267,6 +3267,81 @@ TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
EXPECT_THAT(doom_callback.GetResult(net::ERR_IO_PENDING), IsOk());
}
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateOptimistic) {
+ // Test that we optimize the doom -> create sequence when optimistic ops
+ // are on.
+ SetSimpleCacheMode();
+ InitCache();
+ const char kKey[] = "the key";
+
+ // Create entry and initiate its Doom.
+ disk_cache::Entry* entry1 = nullptr;
+ ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
+ ASSERT_TRUE(entry1 != nullptr);
+
+ net::TestCompletionCallback doom_callback;
+ cache_->DoomEntry(kKey, doom_callback.callback());
+
+ disk_cache::Entry* entry2 = nullptr;
+ net::TestCompletionCallback create_callback;
+ // Open entry2, with same key. With optimistic ops, this should succeed
+ // immediately, hence us using cache_->CreateEntry directly rather than using
+ // the DiskCacheTestWithCache::CreateEntry wrapper which blocks when needed.
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(kKey, &entry2, create_callback.callback()));
+
+ // Do some I/O to make sure it's alive.
+ const int kSize = 2048;
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ EXPECT_EQ(kSize, WriteData(entry2, /* stream_index = */ 1, /* offset = */ 0,
+ buf_1.get(), kSize, /* truncate = */ false));
+ EXPECT_EQ(kSize, ReadData(entry2, /* stream_index = */ 1, /* offset = */ 0,
+ buf_2.get(), kSize));
+
+ doom_callback.WaitForResult();
+
+ entry1->Close();
+ entry2->Close();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateOptimisticMassDoom) {
+ // Test that shows that a certain DCHECK in mass doom code had to be removed
+ // once optimistic doom -> create was added.
+ SetSimpleCacheMode();
+ InitCache();
+ const char kKey[] = "the key";
+
+ // Create entry and initiate its Doom.
+ disk_cache::Entry* entry1 = nullptr;
+ ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
+ ASSERT_TRUE(entry1 != nullptr);
+
+ net::TestCompletionCallback doom_callback;
+ cache_->DoomEntry(kKey, doom_callback.callback());
+
+ disk_cache::Entry* entry2 = nullptr;
+ net::TestCompletionCallback create_callback;
+ // Open entry2, with same key. With optimistic ops, this should succeed
+ // immediately, hence us using cache_->CreateEntry directly rather than using
+ // the DiskCacheTestWithCache::CreateEntry wrapper which blocks when needed.
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(kKey, &entry2, create_callback.callback()));
+
+ net::TestCompletionCallback doomall_callback;
+
+ // This is what had code that had a no-longer valid DCHECK.
+ cache_->DoomAllEntries(doomall_callback.callback());
+
+ doom_callback.WaitForResult();
+ doomall_callback.WaitForResult();
+
+ entry1->Close();
+ entry2->Close();
+}
+
TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
// Test sequence:
// Create, Doom, Create, Doom (1st entry), Open.
@@ -4337,11 +4412,13 @@ TEST_F(DiskCacheEntryTest, SimpleCacheSparseErrorHandling) {
std::string key("a key");
- uint64_t hash = disk_cache::simple_util::GetEntryHashKey(key);
+ disk_cache::SimpleFileTracker::EntryFileKey num_key(
+ disk_cache::simple_util::GetEntryHashKey(key));
base::FilePath path_0 = cache_path_.AppendASCII(
- disk_cache::simple_util::GetFilenameFromEntryHashAndFileIndex(hash, 0));
+ disk_cache::simple_util::GetFilenameFromEntryFileKeyAndFileIndex(num_key,
+ 0));
base::FilePath path_s = cache_path_.AppendASCII(
- disk_cache::simple_util::GetSparseFilenameFromEntryHash(hash));
+ disk_cache::simple_util::GetSparseFilenameFromEntryFileKey(num_key));
disk_cache::Entry* entry = nullptr;
ASSERT_THAT(CreateEntry(key, &entry), IsOk());
@@ -4468,6 +4545,36 @@ TEST_F(DiskCacheEntryTest, SimpleCacheConvertToSparseStream2LeftOver) {
entry->Close();
}
+TEST_F(DiskCacheEntryTest, SimpleCacheLazyStream2CreateFailure) {
+ // Testcase for what happens when lazy-creation of stream 2 fails.
+ const int kSize = 10;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+
+ // Synchronous ops, for ease of disk state;
+ SetCacheType(net::APP_CACHE);
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char kKey[] = "a key";
+ disk_cache::Entry* entry = nullptr;
+ ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
+
+ // Create _1 file for stream 2; this should inject a failure when the cache
+ // tries to create it itself.
+ base::FilePath entry_file1_path = cache_path_.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(kKey, 1));
+ base::File entry_file1(entry_file1_path,
+ base::File::FLAG_WRITE | base::File::FLAG_CREATE);
+ ASSERT_TRUE(entry_file1.IsValid());
+ entry_file1.Close();
+
+ EXPECT_EQ(net::ERR_CACHE_WRITE_FAILURE,
+ WriteData(entry, /* index = */ 2, /* offset = */ 0, buffer.get(),
+ kSize, /* truncate = */ false));
+ entry->Close();
+}
+
class DiskCacheSimplePrefetchTest : public DiskCacheEntryTest {
public:
DiskCacheSimplePrefetchTest()
diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.cc b/chromium/net/disk_cache/simple/simple_backend_impl.cc
index 094cf7e9137..e08ae98fb4b 100644
--- a/chromium/net/disk_cache/simple/simple_backend_impl.cc
+++ b/chromium/net/disk_cache/simple/simple_backend_impl.cc
@@ -37,6 +37,7 @@
#include "net/disk_cache/simple/simple_entry_format.h"
#include "net/disk_cache/simple/simple_entry_impl.h"
#include "net/disk_cache/simple/simple_experiment.h"
+#include "net/disk_cache/simple/simple_file_tracker.h"
#include "net/disk_cache/simple/simple_histogram_macros.h"
#include "net/disk_cache/simple/simple_index.h"
#include "net/disk_cache/simple/simple_index_file.h"
@@ -108,6 +109,11 @@ void MaybeHistogramFdLimit(net::CacheType cache_type) {
g_fd_limit_histogram_has_been_populated = true;
}
+// Global context of all the files we have open --- this permits some to be
+// closed on demand if too many FDs are being used, to avoid running out.
+base::LazyInstance<SimpleFileTracker>::Leaky g_simple_file_tracker =
+ LAZY_INSTANCE_INITIALIZER;
+
// Detects if the files in the cache directory match the current disk cache
// backend type and version. If the directory contains no cache, occupies it
// with the fresh structure.
@@ -220,11 +226,14 @@ class SimpleBackendImpl::ActiveEntryProxy
SimpleBackendImpl::SimpleBackendImpl(
const FilePath& path,
scoped_refptr<BackendCleanupTracker> cleanup_tracker,
+ SimpleFileTracker* file_tracker,
int max_bytes,
net::CacheType cache_type,
const scoped_refptr<base::SequencedTaskRunner>& cache_runner,
net::NetLog* net_log)
: cleanup_tracker_(std::move(cleanup_tracker)),
+ file_tracker_(file_tracker ? file_tracker
+ : g_simple_file_tracker.Pointer()),
path_(path),
cache_type_(cache_type),
cache_runner_(FallbackToInternalIfNull(cache_runner)),
@@ -306,14 +315,12 @@ void SimpleBackendImpl::DoomEntries(std::vector<uint64_t>* entry_hashes,
std::vector<uint64_t> to_doom_individually_hashes;
// For each of the entry hashes, there are two cases:
- // 1. The entry is either open or pending doom, and so it should be doomed
- // individually to avoid flakes.
- // 2. The entry is not in use at all, so we can call
+ // 1. There are corresponding entries in active set, pending doom, or both
+ // sets, and so the hash should be doomed individually to avoid flakes.
+ // 2. The hash is not in active use at all, so we can call
// SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
const uint64_t entry_hash = (*mass_doom_entry_hashes)[i];
- DCHECK(active_entries_.count(entry_hash) == 0 ||
- entries_pending_doom_.count(entry_hash) == 0);
if (!active_entries_.count(entry_hash) &&
!entries_pending_doom_.count(entry_hash)) {
continue;
@@ -399,12 +406,31 @@ int SimpleBackendImpl::CreateEntry(const std::string& key,
CreateOrFindActiveOrDoomedEntry(entry_hash, key, &post_doom);
if (!simple_entry) {
- Callback<int(const net::CompletionCallback&)> operation =
- base::Bind(&SimpleBackendImpl::CreateEntry,
- base::Unretained(this), key, entry);
- post_doom->push_back(
- base::Bind(&RunOperationAndCallback, operation, callback));
- return net::ERR_IO_PENDING;
+ // We would like to optimistically have create go ahead, for benefit of
+ // HTTP cache use. This can only be sanely done if we are the only op
+ // serialized after doom's completion.
+ if (post_doom->empty() &&
+ entry_operations_mode_ == SimpleEntryImpl::OPTIMISTIC_OPERATIONS) {
+ simple_entry = new SimpleEntryImpl(
+ cache_type_, path_, cleanup_tracker_.get(), entry_hash,
+ entry_operations_mode_, this, file_tracker_, net_log_);
+ simple_entry->SetKey(key);
+ simple_entry->SetActiveEntryProxy(
+ ActiveEntryProxy::Create(entry_hash, this));
+ simple_entry->SetCreatePendingDoom();
+ std::pair<EntryMap::iterator, bool> insert_result =
+ active_entries_.insert(
+ EntryMap::value_type(entry_hash, simple_entry.get()));
+ post_doom->push_back(base::Bind(
+ &SimpleEntryImpl::NotifyDoomBeforeCreateComplete, simple_entry));
+ DCHECK(insert_result.second);
+ } else {
+ Callback<int(const net::CompletionCallback&)> operation = base::Bind(
+ &SimpleBackendImpl::CreateEntry, base::Unretained(this), key, entry);
+ post_doom->push_back(
+ base::Bind(&RunOperationAndCallback, operation, callback));
+ return net::ERR_IO_PENDING;
+ }
}
return simple_entry->CreateEntry(entry, callback);
@@ -672,9 +698,9 @@ SimpleBackendImpl::CreateOrFindActiveOrDoomedEntry(
EntryMap::iterator& it = insert_result.first;
const bool did_insert = insert_result.second;
if (did_insert) {
- SimpleEntryImpl* entry = it->second =
- new SimpleEntryImpl(cache_type_, path_, cleanup_tracker_.get(),
- entry_hash, entry_operations_mode_, this, net_log_);
+ SimpleEntryImpl* entry = it->second = new SimpleEntryImpl(
+ cache_type_, path_, cleanup_tracker_.get(), entry_hash,
+ entry_operations_mode_, this, file_tracker_, net_log_);
entry->SetKey(key);
entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this));
}
@@ -710,9 +736,9 @@ int SimpleBackendImpl::OpenEntryFromHash(uint64_t entry_hash,
return OpenEntry(has_active->second->key(), entry, callback);
}
- scoped_refptr<SimpleEntryImpl> simple_entry =
- new SimpleEntryImpl(cache_type_, path_, cleanup_tracker_.get(),
- entry_hash, entry_operations_mode_, this, net_log_);
+ scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl(
+ cache_type_, path_, cleanup_tracker_.get(), entry_hash,
+ entry_operations_mode_, this, file_tracker_, net_log_);
CompletionCallback backend_callback =
base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
AsWeakPtr(), entry_hash, entry, simple_entry, callback);
diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.h b/chromium/net/disk_cache/simple/simple_backend_impl.h
index 14c114169fb..0da30fb6fca 100644
--- a/chromium/net/disk_cache/simple/simple_backend_impl.h
+++ b/chromium/net/disk_cache/simple/simple_backend_impl.h
@@ -49,15 +49,20 @@ namespace disk_cache {
class BackendCleanupTracker;
class SimpleEntryImpl;
+class SimpleFileTracker;
class SimpleIndex;
class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend,
public SimpleIndexDelegate,
public base::SupportsWeakPtr<SimpleBackendImpl> {
public:
+ // Note: only pass non-nullptr for |file_tracker| if you don't want the global
+ // one (which things other than tests would want). |file_tracker| must outlive
+ // the backend and all the entries, including their asynchronous close.
SimpleBackendImpl(
const base::FilePath& path,
scoped_refptr<BackendCleanupTracker> cleanup_tracker,
+ SimpleFileTracker* file_tracker,
int max_bytes,
net::CacheType cache_type,
const scoped_refptr<base::SequencedTaskRunner>& cache_runner,
@@ -225,6 +230,8 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend,
// We want this destroyed after every other field.
scoped_refptr<BackendCleanupTracker> cleanup_tracker_;
+ SimpleFileTracker* const file_tracker_;
+
const base::FilePath path_;
const net::CacheType cache_type_;
std::unique_ptr<SimpleIndex> index_;
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.cc b/chromium/net/disk_cache/simple/simple_entry_impl.cc
index 29c07251391..7123aac04ab 100644
--- a/chromium/net/disk_cache/simple/simple_entry_impl.cc
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.cc
@@ -74,7 +74,7 @@ void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
"WriteResult2", cache_type, result, WRITE_RESULT_MAX);
}
-// TODO(juliatuttle): Consider removing this once we have a good handle on
+// TODO(morlovich): Consider removing this once we have a good handle on
// header size changes.
void RecordHeaderSizeChange(net::CacheType cache_type,
int old_size, int new_size) {
@@ -176,7 +176,7 @@ class SimpleEntryImpl::ScopedOperationRunner {
SimpleEntryImpl* const entry_;
};
-SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() {}
+SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() = default;
SimpleEntryImpl::SimpleEntryImpl(
net::CacheType cache_type,
@@ -185,9 +185,11 @@ SimpleEntryImpl::SimpleEntryImpl(
const uint64_t entry_hash,
OperationsMode operations_mode,
SimpleBackendImpl* backend,
+ SimpleFileTracker* file_tracker,
net::NetLog* net_log)
: cleanup_tracker_(std::move(cleanup_tracker)),
backend_(backend->AsWeakPtr()),
+ file_tracker_(file_tracker),
cache_type_(cache_type),
worker_pool_(backend->worker_pool()),
path_(path),
@@ -199,6 +201,7 @@ SimpleEntryImpl::SimpleEntryImpl(
sparse_data_size_(0),
open_count_(0),
doomed_(false),
+ optimistic_create_pending_doom_state_(CREATE_NORMAL),
state_(STATE_UNINITIALIZED),
synchronous_entry_(NULL),
net_log_(
@@ -280,6 +283,14 @@ int SimpleEntryImpl::CreateEntry(Entry** out_entry,
pending_operations_.push(SimpleEntryOperation::CreateOperation(
this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
ret_value = net::OK;
+
+ // If we are optimistically returning before a preceeding doom, we need to
+ // wait for that IO, about which we will be notified externally.
+ if (optimistic_create_pending_doom_state_ != CREATE_NORMAL) {
+ DCHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
+ optimistic_create_pending_doom_state_);
+ state_ = STATE_IO_PENDING;
+ }
} else {
pending_operations_.push(SimpleEntryOperation::CreateOperation(
this, have_index, callback, out_entry));
@@ -304,13 +315,48 @@ int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
MarkAsDoomed();
- if (backend_.get())
- backend_->OnDoomStart(entry_hash_);
+ if (backend_.get()) {
+ if (optimistic_create_pending_doom_state_ == CREATE_NORMAL) {
+ backend_->OnDoomStart(entry_hash_);
+ } else {
+ DCHECK_EQ(STATE_IO_PENDING, state_);
+ DCHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
+ optimistic_create_pending_doom_state_);
+ // If we are in this state, we went ahead with making the entry even
+ // though the backend was already keeping track of a doom, so it can't
+ // keep track of ours. So we delay notifying it until
+ // NotifyDoomBeforeCreateComplete is called. Since this path is invoked
+ // only when the queue of post-doom callbacks was previously empty, while
+ // the CompletionCallback for the op is posted,
+ // NotifyDoomBeforeCreateComplete() will be the first thing running after
+ // the previous doom completes, so at that point we can immediately grab
+ // a spot in entries_pending_doom_.
+ optimistic_create_pending_doom_state_ =
+ CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM;
+ }
+ }
pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback));
RunNextOperationIfNeeded();
return net::ERR_IO_PENDING;
}
+void SimpleEntryImpl::SetCreatePendingDoom() {
+ DCHECK_EQ(CREATE_NORMAL, optimistic_create_pending_doom_state_);
+ optimistic_create_pending_doom_state_ = CREATE_OPTIMISTIC_PENDING_DOOM;
+}
+
+void SimpleEntryImpl::NotifyDoomBeforeCreateComplete() {
+ DCHECK_EQ(STATE_IO_PENDING, state_);
+ DCHECK_NE(CREATE_NORMAL, optimistic_create_pending_doom_state_);
+ if (backend_.get() && optimistic_create_pending_doom_state_ ==
+ CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM)
+ backend_->OnDoomStart(entry_hash_);
+
+ state_ = STATE_UNINITIALIZED;
+ optimistic_create_pending_doom_state_ = CREATE_NORMAL;
+ RunNextOperationIfNeeded();
+}
+
void SimpleEntryImpl::SetKey(const std::string& key) {
key_ = key;
net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_SET_KEY,
@@ -458,7 +504,7 @@ int SimpleEntryImpl::WriteData(int stream_index,
op_callback = callback;
ret_value = net::ERR_IO_PENDING;
} else {
- // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
+ // TODO(morlovich,pasko): For performance, don't use a copy of an IOBuffer
// here to avoid paying the price of the RefCountedThreadSafe atomic
// operations.
if (buf) {
@@ -534,7 +580,7 @@ int SimpleEntryImpl::GetAvailableRange(int64_t offset,
bool SimpleEntryImpl::CouldBeSparse() const {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(juliatuttle): Actually check.
+ // TODO(morlovich): Actually check.
return true;
}
@@ -716,9 +762,9 @@ void SimpleEntryImpl::OpenEntryInternal(bool have_index,
std::unique_ptr<SimpleEntryCreationResults> results(
new SimpleEntryCreationResults(SimpleEntryStat(
last_used_, last_modified_, data_size_, sparse_data_size_)));
- Closure task =
- base::Bind(&SimpleSynchronousEntry::OpenEntry, cache_type_, path_, key_,
- entry_hash_, have_index, start_time, results.get());
+ Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, cache_type_,
+ path_, key_, entry_hash_, have_index, start_time,
+ file_tracker_, results.get());
Closure reply =
base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, callback,
start_time, base::Passed(&results), out_entry,
@@ -758,9 +804,9 @@ void SimpleEntryImpl::CreateEntryInternal(bool have_index,
std::unique_ptr<SimpleEntryCreationResults> results(
new SimpleEntryCreationResults(SimpleEntryStat(
last_used_, last_modified_, data_size_, sparse_data_size_)));
- Closure task =
- base::Bind(&SimpleSynchronousEntry::CreateEntry, cache_type_, path_, key_,
- entry_hash_, have_index, start_time, results.get());
+ Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, cache_type_,
+ path_, key_, entry_hash_, have_index, start_time,
+ file_tracker_, results.get());
Closure reply =
base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, callback,
start_time, base::Passed(&results), out_entry,
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.h b/chromium/net/disk_cache/simple/simple_entry_impl.h
index d79bc1ebc35..e2fddd5b74f 100644
--- a/chromium/net/disk_cache/simple/simple_entry_impl.h
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.h
@@ -37,8 +37,9 @@ namespace disk_cache {
class BackendCleanupTracker;
class SimpleBackendImpl;
-class SimpleSynchronousEntry;
class SimpleEntryStat;
+class SimpleFileTracker;
+class SimpleSynchronousEntry;
struct SimpleEntryCreationResults;
// SimpleEntryImpl is the IO thread interface to an entry in the very simple
@@ -67,6 +68,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
uint64_t entry_hash,
OperationsMode operations_mode,
SimpleBackendImpl* backend,
+ SimpleFileTracker* file_tracker,
net::NetLog* net_log);
void SetActiveEntryProxy(
@@ -91,6 +93,12 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
// and it will be set.
void SetKey(const std::string& key);
+ // SetCreatePendingDoom() should be called before CreateEntry() if the
+ // creation should suceed optimistically but not do any I/O until
+ // NotifyDoomBeforeCreateComplete() is called.
+ void SetCreatePendingDoom();
+ void NotifyDoomBeforeCreateComplete();
+
// From Entry:
void Doom() override;
void Close() override;
@@ -338,6 +346,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
base::ThreadChecker io_thread_checker_;
const base::WeakPtr<SimpleBackendImpl> backend_;
+ SimpleFileTracker* const file_tracker_;
const net::CacheType cache_type_;
const scoped_refptr<base::TaskRunner> worker_pool_;
const base::FilePath path_;
@@ -361,6 +370,12 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
bool doomed_;
+ enum {
+ CREATE_NORMAL,
+ CREATE_OPTIMISTIC_PENDING_DOOM,
+ CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM,
+ } optimistic_create_pending_doom_state_;
+
State state_;
// When possible, we compute a crc32, for the data in each entry as we read or
diff --git a/chromium/net/disk_cache/simple/simple_entry_operation.cc b/chromium/net/disk_cache/simple/simple_entry_operation.cc
index 7e2f7894a43..36ebde64269 100644
--- a/chromium/net/disk_cache/simple/simple_entry_operation.cc
+++ b/chromium/net/disk_cache/simple/simple_entry_operation.cc
@@ -51,7 +51,7 @@ SimpleEntryOperation::SimpleEntryOperation(const SimpleEntryOperation& other)
alone_in_queue_(other.alone_in_queue_) {
}
-SimpleEntryOperation::~SimpleEntryOperation() {}
+SimpleEntryOperation::~SimpleEntryOperation() = default;
// static
SimpleEntryOperation SimpleEntryOperation::OpenOperation(
diff --git a/chromium/net/disk_cache/simple/simple_file_tracker.cc b/chromium/net/disk_cache/simple/simple_file_tracker.cc
new file mode 100644
index 00000000000..77b650b53b1
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_file_tracker.cc
@@ -0,0 +1,206 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_file_tracker.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/files/file.h"
+#include "base/synchronization/lock.h"
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
+
+namespace disk_cache {
+
+SimpleFileTracker::SimpleFileTracker() = default;
+
+SimpleFileTracker::~SimpleFileTracker() {
+ DCHECK(tracked_files_.empty());
+}
+
+void SimpleFileTracker::Register(const SimpleSynchronousEntry* owner,
+ SubFile subfile,
+ std::unique_ptr<base::File> file) {
+ base::AutoLock hold_lock(lock_);
+
+ // Make sure the list exists.
+ auto insert_status = tracked_files_.insert(std::make_pair(
+ owner->entry_file_key().entry_hash, std::vector<TrackedFiles>()));
+
+ std::vector<TrackedFiles>& candidates = insert_status.first->second;
+
+ // See if entry already exists, if not append.
+ TrackedFiles* owners_files = nullptr;
+ for (TrackedFiles& candidate : candidates) {
+ if (candidate.owner == owner) {
+ owners_files = &candidate;
+ break;
+ }
+ }
+
+ if (!owners_files) {
+ candidates.emplace_back();
+ owners_files = &candidates.back();
+ owners_files->owner = owner;
+ owners_files->key = owner->entry_file_key();
+ }
+
+ int file_index = static_cast<int>(subfile);
+ DCHECK_EQ(TrackedFiles::TF_NO_REGISTRATION, owners_files->state[file_index]);
+ owners_files->files[file_index] = std::move(file);
+ owners_files->state[file_index] = TrackedFiles::TF_REGISTERED;
+}
+
+SimpleFileTracker::FileHandle SimpleFileTracker::Acquire(
+ const SimpleSynchronousEntry* owner,
+ SubFile subfile) {
+ base::AutoLock hold_lock(lock_);
+ std::vector<TrackedFiles>::iterator owners_files = Find(owner);
+ int file_index = static_cast<int>(subfile);
+
+ DCHECK_EQ(TrackedFiles::TF_REGISTERED, owners_files->state[file_index]);
+ owners_files->state[file_index] = TrackedFiles::TF_ACQUIRED;
+ return FileHandle(this, owner, subfile,
+ owners_files->files[file_index].get());
+}
+
+bool SimpleFileTracker::TrackedFiles::Empty() const {
+ for (State s : state)
+ if (s != TF_NO_REGISTRATION)
+ return false;
+ return true;
+}
+
+void SimpleFileTracker::Release(const SimpleSynchronousEntry* owner,
+ SubFile subfile) {
+ std::unique_ptr<base::File> file_to_close;
+
+ {
+ base::AutoLock hold_lock(lock_);
+ std::vector<TrackedFiles>::iterator owners_files = Find(owner);
+ int file_index = static_cast<int>(subfile);
+
+ DCHECK(owners_files->state[file_index] == TrackedFiles::TF_ACQUIRED ||
+ owners_files->state[file_index] ==
+ TrackedFiles::TF_ACQUIRED_PENDING_CLOSE);
+
+ // Prepare to executed deferred close, if any.
+ if (owners_files->state[file_index] ==
+ TrackedFiles::TF_ACQUIRED_PENDING_CLOSE) {
+ file_to_close = PrepareClose(owners_files, file_index);
+ } else {
+ owners_files->state[file_index] = TrackedFiles::TF_REGISTERED;
+ }
+ }
+
+ // The destructor of file_to_close will close it if needed.
+}
+
+void SimpleFileTracker::Close(const SimpleSynchronousEntry* owner,
+ SubFile subfile) {
+ std::unique_ptr<base::File> file_to_close;
+
+ {
+ base::AutoLock hold_lock(lock_);
+ std::vector<TrackedFiles>::iterator owners_files = Find(owner);
+ int file_index = static_cast<int>(subfile);
+
+ DCHECK(owners_files->state[file_index] == TrackedFiles::TF_ACQUIRED ||
+ owners_files->state[file_index] == TrackedFiles::TF_REGISTERED);
+
+ if (owners_files->state[file_index] == TrackedFiles::TF_ACQUIRED) {
+ // The FD is currently acquired, so we can't clean up the TrackedFiles,
+ // just yet; even if this is the last close, so delay the close until it
+ // gets released.
+ owners_files->state[file_index] = TrackedFiles::TF_ACQUIRED_PENDING_CLOSE;
+ } else {
+ file_to_close = PrepareClose(owners_files, file_index);
+ }
+ }
+
+ // The destructor of file_to_close will close it if needed. Thing to watch
+ // for impl with stealing: race between bookkeeping above and actual
+ // close --- the FD is still alive for it.
+}
+
+bool SimpleFileTracker::IsEmptyForTesting() {
+ base::AutoLock hold_lock(lock_);
+ return tracked_files_.empty();
+}
+
+std::vector<SimpleFileTracker::TrackedFiles>::iterator SimpleFileTracker::Find(
+ const SimpleSynchronousEntry* owner) {
+ auto candidates = tracked_files_.find(owner->entry_file_key().entry_hash);
+ DCHECK(candidates != tracked_files_.end());
+ for (std::vector<TrackedFiles>::iterator i = candidates->second.begin();
+ i != candidates->second.end(); ++i) {
+ if (i->owner == owner) {
+ return i;
+ }
+ }
+ LOG(DFATAL) << "SimpleFileTracker operation on non-found entry";
+ return candidates->second.end();
+}
+
+std::unique_ptr<base::File> SimpleFileTracker::PrepareClose(
+ std::vector<TrackedFiles>::iterator owners_files,
+ int file_index) {
+ std::unique_ptr<base::File> file_out =
+ std::move(owners_files->files[file_index]);
+ owners_files->state[file_index] = TrackedFiles::TF_NO_REGISTRATION;
+ if (owners_files->Empty()) {
+ auto iter = tracked_files_.find(owners_files->key.entry_hash);
+ iter->second.erase(owners_files);
+ if (iter->second.empty())
+ tracked_files_.erase(iter);
+ }
+ return file_out;
+}
+
+SimpleFileTracker::FileHandle::FileHandle()
+ : file_tracker_(nullptr), entry_(nullptr), file_(nullptr) {}
+
+SimpleFileTracker::FileHandle::FileHandle(SimpleFileTracker* file_tracker,
+ const SimpleSynchronousEntry* entry,
+ SimpleFileTracker::SubFile subfile,
+ base::File* file)
+ : file_tracker_(file_tracker),
+ entry_(entry),
+ subfile_(subfile),
+ file_(file) {}
+
+SimpleFileTracker::FileHandle::FileHandle(FileHandle&& other) {
+ *this = std::move(other);
+}
+
+SimpleFileTracker::FileHandle::~FileHandle() {
+ if (entry_)
+ file_tracker_->Release(entry_, subfile_);
+}
+
+SimpleFileTracker::FileHandle& SimpleFileTracker::FileHandle::operator=(
+ FileHandle&& other) {
+ file_tracker_ = other.file_tracker_;
+ entry_ = other.entry_;
+ subfile_ = other.subfile_;
+ file_ = other.file_;
+ other.file_tracker_ = nullptr;
+ other.entry_ = nullptr;
+ other.file_ = nullptr;
+ return *this;
+}
+
+base::File* SimpleFileTracker::FileHandle::operator->() const {
+ return file_;
+}
+
+base::File* SimpleFileTracker::FileHandle::get() const {
+ return file_;
+}
+
+bool SimpleFileTracker::FileHandle::IsOK() const {
+ return file_ && file_->IsValid();
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_file_tracker.h b/chromium/net/disk_cache/simple/simple_file_tracker.h
new file mode 100644
index 00000000000..a438825ed20
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_file_tracker.h
@@ -0,0 +1,183 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_FILE_TRACKER_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_FILE_TRACKER_H_
+
+#include <stdint.h>
+#include <algorithm>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+#include "base/files/file.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+
+namespace disk_cache {
+
+class SimpleSynchronousEntry;
+
+// This keeps track of all the files SimpleCache has open, across all the
+// backend instancess, in order to prevent us from running out of file
+// descriptors.
+// TODO(morlovich): Actually implement closing and re-opening of things if we
+// run out.
+//
+// This class is thread-safe.
+class NET_EXPORT_PRIVATE SimpleFileTracker {
+ public:
+ enum class SubFile { FILE_0, FILE_1, FILE_SPARSE };
+
+ // A RAII helper that guards access to a file grabbed for use from
+ // SimpleFileTracker::Acquire(). While it's still alive, if IsOK() is true,
+ // then using the underlying base::File via get() or the -> operator will be
+ // safe.
+ //
+ // This class is movable but not copyable. It should only be used from a
+ // single logical sequence of execution, and should not outlive the
+ // corresponding SimpleSynchronousEntry.
+ class NET_EXPORT_PRIVATE FileHandle {
+ public:
+ FileHandle();
+ FileHandle(FileHandle&& other);
+ ~FileHandle();
+ FileHandle& operator=(FileHandle&& other);
+ base::File* operator->() const;
+ base::File* get() const;
+ // Returns true if this handle points to a valid file. This should normally
+ // be the first thing called on the object, after getting it from
+ // SimpleFileTracker::Acquire.
+ bool IsOK() const;
+
+ private:
+ friend class SimpleFileTracker;
+ FileHandle(SimpleFileTracker* file_tracker,
+ const SimpleSynchronousEntry* entry,
+ SimpleFileTracker::SubFile subfile,
+ base::File* file);
+
+ // All the pointer fields are nullptr in the default/moved away from form.
+ SimpleFileTracker* file_tracker_;
+ const SimpleSynchronousEntry* entry_;
+ SimpleFileTracker::SubFile subfile_;
+ base::File* file_;
+ DISALLOW_COPY_AND_ASSIGN(FileHandle);
+ };
+
+ struct EntryFileKey {
+ EntryFileKey() : entry_hash(0), doom_generation(0) {}
+ explicit EntryFileKey(uint64_t hash)
+ : entry_hash(hash), doom_generation(0) {}
+
+ uint64_t entry_hash;
+
+ // In case of a hash collision, there may be multiple SimpleEntryImpl's
+ // around which have the same entry_hash but different key. In that case,
+ // we doom all but the most recent one and this number will eventually be
+ // used to name the files for the doomed ones.
+ // 0 here means the entry is the active one, and is the only value that's
+ // presently in use here.
+ uint32_t doom_generation;
+ };
+
+ SimpleFileTracker();
+ ~SimpleFileTracker();
+
+ // Established |file| as what's backing |subfile| for |owner|. This is
+ // intended to be called when SimpleSynchronousEntry first sets up the file to
+ // transfer its ownership to SimpleFileTracker. Any Register() call must be
+ // eventually followed by a corresponding Close() call before the |owner| is
+ // destroyed.
+ void Register(const SimpleSynchronousEntry* owner,
+ SubFile subfile,
+ std::unique_ptr<base::File> file);
+
+ // Lends out a file to SimpleSynchronousEntry for use. SimpleFileTracker
+ // will ensure that it doesn't close the file until the handle is destroyed.
+ // The caller should check .IsOK() on the returned value before using it, as
+ // it's possible that the file had to be closed and re-opened due to FD
+ // pressure, and that open may have failed. This should not be called twice
+ // with the exact same arguments until the handle returned from the previous
+ // such call is destroyed.
+ FileHandle Acquire(const SimpleSynchronousEntry* owner, SubFile subfile);
+
+ // Tells SimpleFileTracker that SimpleSynchronousEntry will not be interested
+ // in the file further, so it can be closed and forgotten about. It's OK to
+ // call this while a handle to the file is alive, in which case the effect
+ // takes place after the handle is destroyed.
+ // If Close() has been called and the handle to the file is no longer alive,
+ // a new backing file can be established by calling Register() again.
+ void Close(const SimpleSynchronousEntry* owner, SubFile file);
+
+ // Returns true if there is no in-memory state around, e.g. everything got
+ // cleaned up. This is a test-only method since this object is expected to be
+ // shared between multiple threads, in which case its return value may be
+ // outdated the moment it's returned.
+ bool IsEmptyForTesting();
+
+ private:
+ struct TrackedFiles {
+ // We can potentially run through this state machine multiple times for
+ // FILE_1, as that's often missing, so SimpleSynchronousEntry can sometimes
+ // close and remove the file for an empty stream, then re-open it on actual
+ // data.
+ enum State {
+ TF_NO_REGISTRATION = 0,
+ TF_REGISTERED = 1,
+ TF_ACQUIRED = 2,
+ TF_ACQUIRED_PENDING_CLOSE = 3,
+ };
+
+ TrackedFiles() {
+ std::fill(state, state + kSimpleEntryTotalFileCount, TF_NO_REGISTRATION);
+ }
+
+ bool Empty() const;
+
+ // We use pointers to SimpleSynchronousEntry two ways:
+ // 1) As opaque keys. This is handy as it avoids having to compare paths in
+ // case multiple backends use the same key. Since we access the
+ // bookkeeping under |lock_|
+ //
+ // 2) To get info on the caller of our operation.
+ // Accessing |owner| from any other TrackedFiles would be unsafe (as it
+ // may be doing its own thing in a different thread).
+ const SimpleSynchronousEntry* owner;
+ EntryFileKey key;
+
+ // Some of these may be !IsValid(), if they are not open.
+ // Note that these are stored indirect since we hand out pointers to these,
+ // and we don't want those to become invalid if some other thread appends
+ // things here.
+ std::unique_ptr<base::File> files[kSimpleEntryTotalFileCount];
+
+ State state[kSimpleEntryTotalFileCount];
+ };
+
+ // Marks the file that was previously returned by Acquire as eligible for
+ // closing again. Called by ~FileHandle.
+ void Release(const SimpleSynchronousEntry* owner, SubFile subfile);
+
+ // |*found| will be set to whether the entry was found or not.
+ std::vector<TrackedFiles>::iterator Find(const SimpleSynchronousEntry* owner);
+
+ // Handles state transition of closing file (when we are not deferring it),
+ // and moves the file out. Note that this may invalidate |owners_files|.
+ std::unique_ptr<base::File> PrepareClose(
+ std::vector<TrackedFiles>::iterator owners_files,
+ int file_index);
+
+ base::Lock lock_;
+ std::unordered_map<uint64_t, std::vector<TrackedFiles>> tracked_files_;
+
+ DISALLOW_COPY_AND_ASSIGN(SimpleFileTracker);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_FILE_TRACKER_H_
diff --git a/chromium/net/disk_cache/simple/simple_file_tracker_unittest.cc b/chromium/net/disk_cache/simple/simple_file_tracker_unittest.cc
new file mode 100644
index 00000000000..b09c5a125dd
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_file_tracker_unittest.cc
@@ -0,0 +1,221 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "net/base/cache_type.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/simple/simple_file_tracker.h"
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace disk_cache {
+
+class SimpleFileTrackerTest : public DiskCacheTest {
+ public:
+ void DeleteSyncEntry(SimpleSynchronousEntry* entry) { delete entry; }
+
+ protected:
+ // A bit of messiness since we rely on friendship of the fixture to be able to
+ // create/delete SimpleSynchronousEntry objects.
+ class SyncEntryDeleter {
+ public:
+ SyncEntryDeleter(SimpleFileTrackerTest* fixture) : fixture_(fixture) {}
+ void operator()(SimpleSynchronousEntry* entry) {
+ fixture_->DeleteSyncEntry(entry);
+ }
+
+ private:
+ SimpleFileTrackerTest* fixture_;
+ };
+
+ using SyncEntryPointer =
+ std::unique_ptr<SimpleSynchronousEntry, SyncEntryDeleter>;
+
+ SyncEntryPointer MakeSyncEntry(uint64_t hash) {
+ return SyncEntryPointer(
+ new SimpleSynchronousEntry(net::DISK_CACHE, cache_path_, "dummy", hash,
+ /* had_index=*/true, &file_tracker_),
+ SyncEntryDeleter(this));
+ }
+
+ SimpleFileTracker file_tracker_;
+};
+
+TEST_F(SimpleFileTrackerTest, Basic) {
+ SyncEntryPointer entry = MakeSyncEntry(1);
+
+ // Just transfer some files to the tracker, and then do some I/O on getting
+ // them back.
+ base::FilePath path_0 = cache_path_.AppendASCII("file_0");
+ base::FilePath path_1 = cache_path_.AppendASCII("file_1");
+
+ std::unique_ptr<base::File> file_0 = std::make_unique<base::File>(
+ path_0, base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ std::unique_ptr<base::File> file_1 = std::make_unique<base::File>(
+ path_1, base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(file_0->IsValid());
+ ASSERT_TRUE(file_1->IsValid());
+
+ file_tracker_.Register(entry.get(), SimpleFileTracker::SubFile::FILE_0,
+ std::move(file_0));
+ file_tracker_.Register(entry.get(), SimpleFileTracker::SubFile::FILE_1,
+ std::move(file_1));
+
+ base::StringPiece msg_0 = "Hello";
+ base::StringPiece msg_1 = "Worldish Place";
+
+ {
+ SimpleFileTracker::FileHandle borrow_0 =
+ file_tracker_.Acquire(entry.get(), SimpleFileTracker::SubFile::FILE_0);
+ SimpleFileTracker::FileHandle borrow_1 =
+ file_tracker_.Acquire(entry.get(), SimpleFileTracker::SubFile::FILE_1);
+
+ EXPECT_EQ(static_cast<int>(msg_0.size()),
+ borrow_0->Write(0, msg_0.data(), msg_0.size()));
+ EXPECT_EQ(static_cast<int>(msg_1.size()),
+ borrow_1->Write(0, msg_1.data(), msg_1.size()));
+
+ // For stream 0 do release/close, for stream 1 do close/release --- where
+ // release happens when borrow_{0,1} go out of scope
+ file_tracker_.Close(entry.get(), SimpleFileTracker::SubFile::FILE_1);
+ }
+ file_tracker_.Close(entry.get(), SimpleFileTracker::SubFile::FILE_0);
+
+ // Verify contents.
+ std::string verify_0, verify_1;
+ EXPECT_TRUE(ReadFileToString(path_0, &verify_0));
+ EXPECT_TRUE(ReadFileToString(path_1, &verify_1));
+ EXPECT_EQ(msg_0, verify_0);
+ EXPECT_EQ(msg_1, verify_1);
+ EXPECT_TRUE(file_tracker_.IsEmptyForTesting());
+}
+
+TEST_F(SimpleFileTrackerTest, Collision) {
+ // Two entries with same key.
+ SyncEntryPointer entry = MakeSyncEntry(1);
+ SyncEntryPointer entry2 = MakeSyncEntry(1);
+
+ base::FilePath path = cache_path_.AppendASCII("file");
+ base::FilePath path2 = cache_path_.AppendASCII("file2");
+
+ std::unique_ptr<base::File> file = std::make_unique<base::File>(
+ path, base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ std::unique_ptr<base::File> file2 = std::make_unique<base::File>(
+ path2, base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(file->IsValid());
+ ASSERT_TRUE(file2->IsValid());
+
+ file_tracker_.Register(entry.get(), SimpleFileTracker::SubFile::FILE_0,
+ std::move(file));
+ file_tracker_.Register(entry2.get(), SimpleFileTracker::SubFile::FILE_0,
+ std::move(file2));
+
+ base::StringPiece msg = "Alpha";
+ base::StringPiece msg2 = "Beta";
+
+ {
+ SimpleFileTracker::FileHandle borrow =
+ file_tracker_.Acquire(entry.get(), SimpleFileTracker::SubFile::FILE_0);
+ SimpleFileTracker::FileHandle borrow2 =
+ file_tracker_.Acquire(entry2.get(), SimpleFileTracker::SubFile::FILE_0);
+
+ EXPECT_EQ(static_cast<int>(msg.size()),
+ borrow->Write(0, msg.data(), msg.size()));
+ EXPECT_EQ(static_cast<int>(msg2.size()),
+ borrow2->Write(0, msg2.data(), msg2.size()));
+ }
+ file_tracker_.Close(entry.get(), SimpleFileTracker::SubFile::FILE_0);
+ file_tracker_.Close(entry2.get(), SimpleFileTracker::SubFile::FILE_0);
+
+ // Verify contents.
+ std::string verify, verify2;
+ EXPECT_TRUE(ReadFileToString(path, &verify));
+ EXPECT_TRUE(ReadFileToString(path2, &verify2));
+ EXPECT_EQ(msg, verify);
+ EXPECT_EQ(msg2, verify2);
+ EXPECT_TRUE(file_tracker_.IsEmptyForTesting());
+}
+
+TEST_F(SimpleFileTrackerTest, Reopen) {
+ // We may sometimes go Register -> Close -> Register, with info still
+ // alive.
+ SyncEntryPointer entry = MakeSyncEntry(1);
+
+ base::FilePath path_0 = cache_path_.AppendASCII("file_0");
+ base::FilePath path_1 = cache_path_.AppendASCII("file_1");
+
+ std::unique_ptr<base::File> file_0 = std::make_unique<base::File>(
+ path_0, base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ std::unique_ptr<base::File> file_1 = std::make_unique<base::File>(
+ path_1, base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(file_0->IsValid());
+ ASSERT_TRUE(file_1->IsValid());
+
+ file_tracker_.Register(entry.get(), SimpleFileTracker::SubFile::FILE_0,
+ std::move(file_0));
+ file_tracker_.Register(entry.get(), SimpleFileTracker::SubFile::FILE_1,
+ std::move(file_1));
+
+ file_tracker_.Close(entry.get(), SimpleFileTracker::SubFile::FILE_1);
+ base::File file_1b(path_1, base::File::FLAG_OPEN | base::File::FLAG_WRITE);
+ ASSERT_TRUE(file_1b.IsValid());
+ file_tracker_.Register(entry.get(), SimpleFileTracker::SubFile::FILE_1,
+ std::move(file_1));
+ file_tracker_.Close(entry.get(), SimpleFileTracker::SubFile::FILE_0);
+ file_tracker_.Close(entry.get(), SimpleFileTracker::SubFile::FILE_1);
+ EXPECT_TRUE(file_tracker_.IsEmptyForTesting());
+}
+
+TEST_F(SimpleFileTrackerTest, PointerStability) {
+ // Make sure the FileHandle lent out doesn't get screwed up as we update
+ // the state (and potentially move the underlying base::File object around).
+ const int kEntries = 8;
+ SyncEntryPointer entries[kEntries] = {
+ MakeSyncEntry(1), MakeSyncEntry(1), MakeSyncEntry(1), MakeSyncEntry(1),
+ MakeSyncEntry(1), MakeSyncEntry(1), MakeSyncEntry(1), MakeSyncEntry(1),
+ };
+ std::unique_ptr<base::File> file_0 = std::make_unique<base::File>(
+ cache_path_.AppendASCII("0"),
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(file_0->IsValid());
+ file_tracker_.Register(entries[0].get(), SimpleFileTracker::SubFile::FILE_0,
+ std::move(file_0));
+
+ base::StringPiece msg = "Message to write";
+ {
+ SimpleFileTracker::FileHandle borrow = file_tracker_.Acquire(
+ entries[0].get(), SimpleFileTracker::SubFile::FILE_0);
+ for (int i = 1; i < kEntries; ++i) {
+ std::unique_ptr<base::File> file_n = std::make_unique<base::File>(
+ cache_path_.AppendASCII(base::IntToString(i)),
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(file_n->IsValid());
+ file_tracker_.Register(entries[i].get(),
+ SimpleFileTracker::SubFile::FILE_0,
+ std::move(file_n));
+ }
+
+ EXPECT_EQ(static_cast<int>(msg.size()),
+ borrow->Write(0, msg.data(), msg.size()));
+ }
+
+ for (int i = 0; i < kEntries; ++i)
+ file_tracker_.Close(entries[i].get(), SimpleFileTracker::SubFile::FILE_0);
+
+ // Verify the file.
+ std::string verify;
+ EXPECT_TRUE(ReadFileToString(cache_path_.AppendASCII("0"), &verify));
+ EXPECT_EQ(msg, verify);
+ EXPECT_TRUE(file_tracker_.IsEmptyForTesting());
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_index_file.cc b/chromium/net/disk_cache/simple/simple_index_file.cc
index 972422c1f63..d5aa642a9d0 100644
--- a/chromium/net/disk_cache/simple/simple_index_file.cc
+++ b/chromium/net/disk_cache/simple/simple_index_file.cc
@@ -210,8 +210,7 @@ SimpleIndexLoadResult::SimpleIndexLoadResult()
index_write_reason(SimpleIndex::INDEX_WRITE_REASON_MAX),
flush_required(false) {}
-SimpleIndexLoadResult::~SimpleIndexLoadResult() {
-}
+SimpleIndexLoadResult::~SimpleIndexLoadResult() = default;
void SimpleIndexLoadResult::Reset() {
did_load = false;
@@ -311,9 +310,6 @@ void SimpleIndexFile::SyncWriteToDisk(net::CacheType cache_type,
}
// Atomically rename the temporary index file to become the real one.
- // TODO(gavinp): DCHECK when not shutting down, since that is very strange.
- // The rename failing during shutdown is legal because it's legal to begin
- // erasing a cache as soon as the destructor has been called.
if (!base::ReplaceFile(temp_index_filename, index_filename, NULL))
return;
@@ -356,7 +352,7 @@ SimpleIndexFile::SimpleIndexFile(
temp_index_file_(cache_directory_.AppendASCII(kIndexDirectory)
.AppendASCII(kTempIndexFileName)) {}
-SimpleIndexFile::~SimpleIndexFile() {}
+SimpleIndexFile::~SimpleIndexFile() = default;
void SimpleIndexFile::LoadIndexEntries(base::Time cache_last_modified,
const base::Closure& callback,
diff --git a/chromium/net/disk_cache/simple/simple_index_file_unittest.cc b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
index 16e420e3a77..1e0254d457d 100644
--- a/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
@@ -155,7 +155,7 @@ class WrappedSimpleIndexFile : public SimpleIndexFile {
base::ThreadTaskRunnerHandle::Get(),
net::DISK_CACHE,
index_file_directory) {}
- ~WrappedSimpleIndexFile() override {}
+ ~WrappedSimpleIndexFile() override = default;
const base::FilePath& GetIndexFilePath() const {
return index_file_;
@@ -402,7 +402,8 @@ TEST_F(SimpleIndexFileTest, SimpleCacheUpgrade) {
base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
disk_cache::SimpleBackendImpl* simple_cache =
new disk_cache::SimpleBackendImpl(
- cache_path, /* cleanup_tracker = */ nullptr, 0, net::DISK_CACHE,
+ cache_path, /* cleanup_tracker = */ nullptr,
+ /* file_tracker = */ nullptr, 0, net::DISK_CACHE,
cache_thread.task_runner(), /* net_log = */ nullptr);
net::TestCompletionCallback cb;
int rv = simple_cache->Init(cb.callback());
diff --git a/chromium/net/disk_cache/simple/simple_index_unittest.cc b/chromium/net/disk_cache/simple/simple_index_unittest.cc
index a2b4b4a00f4..03a6bedcdcb 100644
--- a/chromium/net/disk_cache/simple/simple_index_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_index_unittest.cc
@@ -312,7 +312,7 @@ TEST_F(SimpleIndexTest, Has) {
index()->Insert(kHash1);
EXPECT_TRUE(index()->Has(kHash1));
index()->Remove(kHash1);
- // TODO(rdsmith): Maybe return false on explicitly removed entries?
+ // TODO(morlovich): Maybe return false on explicitly removed entries?
EXPECT_TRUE(index()->Has(kHash1));
ReturnIndexFile();
@@ -587,7 +587,7 @@ TEST_F(SimpleIndexTest, BasicEviction) {
EXPECT_TRUE(index()->Has(hashes_.at<3>()));
// Trigger an eviction, and make sure the right things are tossed.
- // TODO(rdsmith): This is dependent on the innards of the implementation
+ // TODO(morlovich): This is dependent on the innards of the implementation
// as to at exactly what point we trigger eviction. Not sure how to fix
// that.
index()->UpdateEntrySize(hashes_.at<3>(), 475u);
@@ -621,7 +621,7 @@ TEST_F(SimpleIndexTest, EvictByLRU) {
EXPECT_TRUE(index()->Has(hashes_.at<3>()));
// Trigger an eviction, and make sure the right things are tossed.
- // TODO(rdsmith): This is dependent on the innards of the implementation
+ // TODO(morlovich): This is dependent on the innards of the implementation
// as to at exactly what point we trigger eviction. Not sure how to fix
// that.
index()->UpdateEntrySize(hashes_.at<3>(), 40000u);
@@ -652,7 +652,7 @@ TEST_F(SimpleIndexTest, EvictBySize) {
EXPECT_TRUE(index()->Has(hashes_.at<3>()));
// Trigger an eviction, and make sure the right things are tossed.
- // TODO(rdsmith): This is dependent on the innards of the implementation
+ // TODO(morlovich): This is dependent on the innards of the implementation
// as to at exactly what point we trigger eviction. Not sure how to fix
// that.
index()->UpdateEntrySize(hashes_.at<3>(), 40000u);
@@ -685,7 +685,7 @@ TEST_F(SimpleIndexTest, EvictBySize2) {
EXPECT_TRUE(index()->Has(hashes_.at<3>()));
// Trigger an eviction, and make sure the right things are tossed.
- // TODO(rdsmith): This is dependent on the innards of the implementation
+ // TODO(morlovich): This is dependent on the innards of the implementation
// as to at exactly what point we trigger eviction. Not sure how to fix
// that.
index()->UpdateEntrySize(hashes_.at<3>(), 40000u);
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
index 455f00d96b1..74b07f632aa 100644
--- a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
@@ -107,11 +107,17 @@ void CalculateSHA256OfKey(const std::string& key,
hash->Finish(out_hash_value, sizeof(*out_hash_value));
}
+SimpleFileTracker::SubFile SubFileForFileIndex(int file_index) {
+ DCHECK_GT(kSimpleEntryNormalFileCount, file_index);
+ return file_index == 0 ? SimpleFileTracker::SubFile::FILE_0
+ : SimpleFileTracker::SubFile::FILE_1;
+}
+
} // namespace
using simple_util::GetEntryHashKey;
-using simple_util::GetFilenameFromEntryHashAndFileIndex;
-using simple_util::GetSparseFilenameFromEntryHash;
+using simple_util::GetFilenameFromEntryFileKeyAndFileIndex;
+using simple_util::GetSparseFilenameFromEntryFileKey;
using simple_util::GetHeaderSize;
using simple_util::GetDataSizeFromFileSize;
using simple_util::GetFileSizeFromDataSize;
@@ -181,14 +187,13 @@ int64_t SimpleEntryStat::GetFileSize(size_t key_length, int file_index) const {
SimpleStreamPrefetchData::SimpleStreamPrefetchData()
: stream_crc32(crc32(0, Z_NULL, 0)) {}
-SimpleStreamPrefetchData::~SimpleStreamPrefetchData() {}
+SimpleStreamPrefetchData::~SimpleStreamPrefetchData() = default;
SimpleEntryCreationResults::SimpleEntryCreationResults(
SimpleEntryStat entry_stat)
: sync_entry(NULL), entry_stat(entry_stat), result(net::OK) {}
-SimpleEntryCreationResults::~SimpleEntryCreationResults() {
-}
+SimpleEntryCreationResults::~SimpleEntryCreationResults() = default;
SimpleSynchronousEntry::CRCRecord::CRCRecord() : index(-1),
has_crc32(false),
@@ -231,13 +236,14 @@ void SimpleSynchronousEntry::OpenEntry(
const uint64_t entry_hash,
const bool had_index,
const base::TimeTicks& time_enqueued,
+ SimpleFileTracker* file_tracker,
SimpleEntryCreationResults* out_results) {
base::TimeTicks start_sync_open_entry = base::TimeTicks::Now();
SIMPLE_CACHE_UMA(TIMES, "QueueLatency.OpenEntry", cache_type,
(start_sync_open_entry - time_enqueued));
- SimpleSynchronousEntry* sync_entry =
- new SimpleSynchronousEntry(cache_type, path, key, entry_hash, had_index);
+ SimpleSynchronousEntry* sync_entry = new SimpleSynchronousEntry(
+ cache_type, path, key, entry_hash, had_index, file_tracker);
out_results->result = sync_entry->InitializeForOpen(
&out_results->entry_stat, out_results->stream_prefetch_data);
if (out_results->result != net::OK) {
@@ -261,14 +267,15 @@ void SimpleSynchronousEntry::CreateEntry(
const uint64_t entry_hash,
const bool had_index,
const base::TimeTicks& time_enqueued,
+ SimpleFileTracker* file_tracker,
SimpleEntryCreationResults* out_results) {
DCHECK_EQ(entry_hash, GetEntryHashKey(key));
base::TimeTicks start_sync_create_entry = base::TimeTicks::Now();
SIMPLE_CACHE_UMA(TIMES, "QueueLatency.CreateEntry", cache_type,
(start_sync_create_entry - time_enqueued));
- SimpleSynchronousEntry* sync_entry =
- new SimpleSynchronousEntry(cache_type, path, key, entry_hash, had_index);
+ SimpleSynchronousEntry* sync_entry = new SimpleSynchronousEntry(
+ cache_type, path, key, entry_hash, had_index, file_tracker);
out_results->result =
sync_entry->InitializeForCreate(&out_results->entry_stat);
if (out_results->result != net::OK) {
@@ -317,8 +324,11 @@ void SimpleSynchronousEntry::ReadData(const EntryOperationData& in_entry_op,
DCHECK(initialized_);
DCHECK_NE(0, in_entry_op.index);
int file_index = GetFileIndexFromStreamIndex(in_entry_op.index);
- if (header_and_key_check_needed_[file_index] &&
- !CheckHeaderAndKey(file_index)) {
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(file_index));
+
+ if (!file.IsOK() || (header_and_key_check_needed_[file_index] &&
+ !CheckHeaderAndKey(file.get(), file_index))) {
*out_result = net::ERR_FAILED;
Doom();
return;
@@ -329,8 +339,8 @@ void SimpleSynchronousEntry::ReadData(const EntryOperationData& in_entry_op,
// be handled in the SimpleEntryImpl.
DCHECK_GT(in_entry_op.buf_len, 0);
DCHECK(!empty_file_omitted_[file_index]);
- int bytes_read = files_[file_index].Read(file_offset, out_buf->data(),
- in_entry_op.buf_len);
+ int bytes_read =
+ file->Read(file_offset, out_buf->data(), in_entry_op.buf_len);
if (bytes_read > 0) {
entry_stat->set_last_used(Time::Now());
if (crc_request != nullptr) {
@@ -341,8 +351,9 @@ void SimpleSynchronousEntry::ReadData(const EntryOperationData& in_entry_op,
in_entry_op.offset + bytes_read ==
entry_stat->data_size(in_entry_op.index)) {
crc_request->performed_verify = true;
- int checksum_result = CheckEOFRecord(in_entry_op.index, *entry_stat,
- crc_request->data_crc32);
+ int checksum_result =
+ CheckEOFRecord(file.get(), in_entry_op.index, *entry_stat,
+ crc_request->data_crc32);
if (checksum_result < 0) {
crc_request->verify_ok = false;
*out_result = checksum_result;
@@ -371,10 +382,14 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
int index = in_entry_op.index;
int file_index = GetFileIndexFromStreamIndex(index);
if (header_and_key_check_needed_[file_index] &&
- !empty_file_omitted_[file_index] && !CheckHeaderAndKey(file_index)) {
- *out_result = net::ERR_FAILED;
- Doom();
- return;
+ !empty_file_omitted_[file_index]) {
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(file_index));
+ if (!file.IsOK() || !CheckHeaderAndKey(file.get(), file_index)) {
+ *out_result = net::ERR_FAILED;
+ Doom();
+ return;
+ }
}
int offset = in_entry_op.offset;
int buf_len = in_entry_op.buf_len;
@@ -412,11 +427,21 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
}
DCHECK(!empty_file_omitted_[file_index]);
+ // This needs to be grabbed after the above block, since that's what may
+ // create the file (for stream 2/file 1).
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(file_index));
+ if (!file.IsOK()) {
+ *out_result = net::ERR_FAILED;
+ Doom();
+ return;
+ }
+
if (extending_by_write) {
// The EOF record and the eventual stream afterward need to be zeroed out.
const int64_t file_eof_offset =
out_entry_stat->GetEOFOffsetInFile(key_.size(), index);
- if (!files_[file_index].SetLength(file_eof_offset)) {
+ if (!file->SetLength(file_eof_offset)) {
RecordWriteResult(cache_type_, SYNC_WRITE_RESULT_PRETRUNCATE_FAILURE);
Doom();
*out_result = net::ERR_CACHE_WRITE_FAILURE;
@@ -424,8 +449,7 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
}
}
if (buf_len > 0) {
- if (files_[file_index].Write(file_offset, in_buf->data(), buf_len) !=
- buf_len) {
+ if (file->Write(file_offset, in_buf->data(), buf_len) != buf_len) {
RecordWriteResult(cache_type_, SYNC_WRITE_RESULT_WRITE_FAILURE);
Doom();
*out_result = net::ERR_CACHE_WRITE_FAILURE;
@@ -439,7 +463,7 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
out_entry_stat->set_data_size(index, offset + buf_len);
int file_eof_offset =
out_entry_stat->GetLastEOFOffsetInFile(key_.size(), index);
- if (!files_[file_index].SetLength(file_eof_offset)) {
+ if (!file->SetLength(file_eof_offset)) {
RecordWriteResult(cache_type_, SYNC_WRITE_RESULT_TRUNCATE_FAILURE);
Doom();
*out_result = net::ERR_CACHE_WRITE_FAILURE;
@@ -468,6 +492,19 @@ void SimpleSynchronousEntry::ReadSparseData(
char* buf = out_buf->data();
int read_so_far = 0;
+ if (!sparse_file_open()) {
+ *out_result = 0;
+ return;
+ }
+
+ SimpleFileTracker::FileHandle sparse_file =
+ file_tracker_->Acquire(this, SimpleFileTracker::SubFile::FILE_SPARSE);
+ if (!sparse_file.IsOK()) {
+ Doom();
+ *out_result = net::ERR_CACHE_READ_FAILURE;
+ return;
+ }
+
// Find the first sparse range at or after the requested offset.
SparseRangeIterator it = sparse_ranges_.lower_bound(offset);
@@ -488,7 +525,8 @@ void SimpleSynchronousEntry::ReadSparseData(
DCHECK_GE(range_len_after_offset, 0);
int len_to_read = std::min(buf_len, range_len_after_offset);
- if (!ReadSparseRange(found_range, net_offset, len_to_read, buf)) {
+ if (!ReadSparseRange(sparse_file.get(), found_range, net_offset,
+ len_to_read, buf)) {
Doom();
*out_result = net::ERR_CACHE_READ_FAILURE;
return;
@@ -507,7 +545,8 @@ void SimpleSynchronousEntry::ReadSparseData(
DCHECK_EQ(it->first, found_range->offset);
int range_len = base::saturated_cast<int>(found_range->length);
int len_to_read = std::min(buf_len - read_so_far, range_len);
- if (!ReadSparseRange(found_range, 0, len_to_read, buf + read_so_far)) {
+ if (!ReadSparseRange(sparse_file.get(), found_range, 0, len_to_read,
+ buf + read_so_far)) {
Doom();
*out_result = net::ERR_CACHE_READ_FAILURE;
return;
@@ -538,6 +577,13 @@ void SimpleSynchronousEntry::WriteSparseData(
*out_result = net::ERR_CACHE_WRITE_FAILURE;
return;
}
+ SimpleFileTracker::FileHandle sparse_file =
+ file_tracker_->Acquire(this, SimpleFileTracker::SubFile::FILE_SPARSE);
+ if (!sparse_file.IsOK()) {
+ Doom();
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
uint64_t sparse_data_size = out_entry_stat->sparse_data_size();
// This is a pessimistic estimate; it assumes the entire buffer is going to
@@ -545,7 +591,7 @@ void SimpleSynchronousEntry::WriteSparseData(
if (sparse_data_size + buf_len > max_sparse_data_size) {
DVLOG(1) << "Truncating sparse data file (" << sparse_data_size << " + "
<< buf_len << " > " << max_sparse_data_size << ")";
- TruncateSparseFile();
+ TruncateSparseFile(sparse_file.get());
out_entry_stat->set_sparse_data_size(0);
}
@@ -566,7 +612,8 @@ void SimpleSynchronousEntry::WriteSparseData(
DCHECK_GE(range_len_after_offset, 0);
int len_to_write = std::min(buf_len, range_len_after_offset);
- if (!WriteSparseRange(found_range, net_offset, len_to_write, buf)) {
+ if (!WriteSparseRange(sparse_file.get(), found_range, net_offset,
+ len_to_write, buf)) {
Doom();
*out_result = net::ERR_CACHE_WRITE_FAILURE;
return;
@@ -583,9 +630,8 @@ void SimpleSynchronousEntry::WriteSparseData(
if (offset + written_so_far < found_range->offset) {
int len_to_append =
static_cast<int>(found_range->offset - (offset + written_so_far));
- if (!AppendSparseRange(offset + written_so_far,
- len_to_append,
- buf + written_so_far)) {
+ if (!AppendSparseRange(sparse_file.get(), offset + written_so_far,
+ len_to_append, buf + written_so_far)) {
Doom();
*out_result = net::ERR_CACHE_WRITE_FAILURE;
return;
@@ -595,9 +641,7 @@ void SimpleSynchronousEntry::WriteSparseData(
}
int range_len = base::saturated_cast<int>(found_range->length);
int len_to_write = std::min(buf_len - written_so_far, range_len);
- if (!WriteSparseRange(found_range,
- 0,
- len_to_write,
+ if (!WriteSparseRange(sparse_file.get(), found_range, 0, len_to_write,
buf + written_so_far)) {
Doom();
*out_result = net::ERR_CACHE_WRITE_FAILURE;
@@ -609,9 +653,8 @@ void SimpleSynchronousEntry::WriteSparseData(
if (written_so_far < buf_len) {
int len_to_append = buf_len - written_so_far;
- if (!AppendSparseRange(offset + written_so_far,
- len_to_append,
- buf + written_so_far)) {
+ if (!AppendSparseRange(sparse_file.get(), offset + written_so_far,
+ len_to_append, buf + written_so_far)) {
Doom();
*out_result = net::ERR_CACHE_WRITE_FAILURE;
return;
@@ -668,14 +711,15 @@ void SimpleSynchronousEntry::GetAvailableRange(
*out_result = static_cast<int>(std::min(avail_so_far, len_from_start));
}
-int SimpleSynchronousEntry::CheckEOFRecord(int stream_index,
+int SimpleSynchronousEntry::CheckEOFRecord(base::File* file,
+ int stream_index,
const SimpleEntryStat& entry_stat,
uint32_t expected_crc32) {
DCHECK(initialized_);
SimpleFileEOF eof_record;
int file_offset = entry_stat.GetEOFOffsetInFile(key_.size(), stream_index);
int file_index = GetFileIndexFromStreamIndex(stream_index);
- int rv = GetEOFRecordData(base::StringPiece(), file_index, file_offset,
+ int rv = GetEOFRecordData(file, base::StringPiece(), file_index, file_offset,
&eof_record);
if (rv != net::OK) {
@@ -694,6 +738,7 @@ int SimpleSynchronousEntry::CheckEOFRecord(int stream_index,
}
int SimpleSynchronousEntry::PreReadStreamPayload(
+ base::File* file,
base::StringPiece file_0_prefetch,
int stream_index,
int extra_size,
@@ -707,8 +752,8 @@ int SimpleSynchronousEntry::PreReadStreamPayload(
out->data = new net::GrowableIOBuffer();
out->data->SetCapacity(read_size);
int file_offset = entry_stat.GetOffsetInFile(key_.size(), 0, stream_index);
- if (!ReadFromFileOrPrefetched(file_0_prefetch, 0, file_offset, read_size,
- out->data->data()))
+ if (!ReadFromFileOrPrefetched(file, file_0_prefetch, 0, file_offset,
+ read_size, out->data->data()))
return net::ERR_FAILED;
// Check the CRC32.
@@ -738,20 +783,28 @@ void SimpleSynchronousEntry::Close(
if (empty_file_omitted_[file_index])
continue;
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(file_index));
+ if (!file.IsOK()) {
+ RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
+ Doom();
+ break;
+ }
+
if (stream_index == 0) {
// Write stream 0 data.
int stream_0_offset = entry_stat.GetOffsetInFile(key_.size(), 0, 0);
- if (files_[0].Write(stream_0_offset, stream_0_data->data(),
- entry_stat.data_size(0)) != entry_stat.data_size(0)) {
+ if (file->Write(stream_0_offset, stream_0_data->data(),
+ entry_stat.data_size(0)) != entry_stat.data_size(0)) {
RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
DVLOG(1) << "Could not write stream 0 data.";
Doom();
}
net::SHA256HashValue hash_value;
CalculateSHA256OfKey(key_, &hash_value);
- if (files_[0].Write(stream_0_offset + entry_stat.data_size(0),
- reinterpret_cast<char*>(hash_value.data),
- sizeof(hash_value)) != sizeof(hash_value)) {
+ if (file->Write(stream_0_offset + entry_stat.data_size(0),
+ reinterpret_cast<char*>(hash_value.data),
+ sizeof(hash_value)) != sizeof(hash_value)) {
RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
DVLOG(1) << "Could not write stream 0 data.";
Doom();
@@ -771,17 +824,14 @@ void SimpleSynchronousEntry::Close(
// If stream 0 changed size, the file needs to be resized, otherwise the
// next open will yield wrong stream sizes. On stream 1 and stream 2 proper
// resizing of the file is handled in SimpleSynchronousEntry::WriteData().
- if (stream_index == 0 &&
- !files_[file_index].SetLength(eof_offset)) {
+ if (stream_index == 0 && !file->SetLength(eof_offset)) {
RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
DVLOG(1) << "Could not truncate stream 0 file.";
Doom();
break;
}
- if (files_[file_index].Write(eof_offset,
- reinterpret_cast<const char*>(&eof_record),
- sizeof(eof_record)) !=
- sizeof(eof_record)) {
+ if (file->Write(eof_offset, reinterpret_cast<const char*>(&eof_record),
+ sizeof(eof_record)) != sizeof(eof_record)) {
RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
DVLOG(1) << "Could not write eof record.";
Doom();
@@ -792,10 +842,13 @@ void SimpleSynchronousEntry::Close(
if (empty_file_omitted_[i])
continue;
- if (header_and_key_check_needed_[i] && !CheckHeaderAndKey(i)) {
- Doom();
+ if (header_and_key_check_needed_[i]) {
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(i));
+ if (!file.IsOK() || !CheckHeaderAndKey(file.get(), i))
+ Doom();
}
- files_[i].Close();
+ file_tracker_->Close(this, SubFileForFileIndex(i));
const int64_t file_size = entry_stat.GetFileSize(key_.size(), i);
SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
"LastClusterSize", cache_type_,
@@ -807,8 +860,9 @@ void SimpleSynchronousEntry::Close(
cluster_loss * 100 / (cluster_loss + file_size)));
}
- if (sparse_file_open())
- sparse_file_.Close();
+ if (sparse_file_open()) {
+ CloseSparseFile();
+ }
if (files_created_) {
const int stream2_file_index = GetFileIndexFromStreamIndex(2);
@@ -826,14 +880,17 @@ SimpleSynchronousEntry::SimpleSynchronousEntry(net::CacheType cache_type,
const FilePath& path,
const std::string& key,
const uint64_t entry_hash,
- const bool had_index)
+ const bool had_index,
+ SimpleFileTracker* file_tracker)
: cache_type_(cache_type),
path_(path),
- entry_hash_(entry_hash),
+ entry_file_key_(entry_hash),
had_index_(had_index),
key_(key),
have_open_files_(false),
- initialized_(false) {
+ initialized_(false),
+ file_tracker_(file_tracker),
+ sparse_file_open_(false) {
for (int i = 0; i < kSimpleEntryNormalFileCount; ++i)
empty_file_omitted_[i] = false;
}
@@ -852,16 +909,22 @@ bool SimpleSynchronousEntry::MaybeOpenFile(
FilePath filename = GetFilenameFromFileIndex(file_index);
int flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE |
File::FLAG_SHARE_DELETE;
- files_[file_index].Initialize(filename, flags);
- *out_error = files_[file_index].error_details();
+ std::unique_ptr<base::File> file =
+ std::make_unique<base::File>(filename, flags);
+ *out_error = file->error_details();
- if (CanOmitEmptyFile(file_index) && !files_[file_index].IsValid() &&
+ if (CanOmitEmptyFile(file_index) && !file->IsValid() &&
*out_error == File::FILE_ERROR_NOT_FOUND) {
empty_file_omitted_[file_index] = true;
return true;
}
- return files_[file_index].IsValid();
+ if (file->IsValid()) {
+ file_tracker_->Register(this, SubFileForFileIndex(file_index),
+ std::move(file));
+ return true;
+ }
+ return false;
}
bool SimpleSynchronousEntry::MaybeCreateFile(
@@ -878,32 +941,35 @@ bool SimpleSynchronousEntry::MaybeCreateFile(
FilePath filename = GetFilenameFromFileIndex(file_index);
int flags = File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE |
File::FLAG_SHARE_DELETE;
- files_[file_index].Initialize(filename, flags);
+ std::unique_ptr<base::File> file =
+ std::make_unique<base::File>(filename, flags);
// It's possible that the creation failed because someone deleted the
// directory (e.g. because someone pressed "clear cache" on Android).
// If so, we would keep failing for a while until periodic index snapshot
// re-creates the cache dir, so try to recover from it quickly here.
- if (!files_[file_index].IsValid() &&
- files_[file_index].error_details() == File::FILE_ERROR_NOT_FOUND &&
+ if (!file->IsValid() && file->error_details() == File::FILE_ERROR_NOT_FOUND &&
!base::DirectoryExists(path_)) {
if (base::CreateDirectory(path_))
- files_[file_index].Initialize(filename, flags);
+ file->Initialize(filename, flags);
}
- *out_error = files_[file_index].error_details();
- empty_file_omitted_[file_index] = false;
-
- return files_[file_index].IsValid();
+ *out_error = file->error_details();
+ if (file->IsValid()) {
+ file_tracker_->Register(this, SubFileForFileIndex(file_index),
+ std::move(file));
+ empty_file_omitted_[file_index] = false;
+ return true;
+ }
+ return false;
}
bool SimpleSynchronousEntry::OpenFiles(SimpleEntryStat* out_entry_stat) {
for (int i = 0; i < kSimpleEntryNormalFileCount; ++i) {
File::Error error;
if (!MaybeOpenFile(i, &error)) {
- // TODO(juliatuttle,gavinp): Remove one each of these triplets of
- // histograms. We can calculate the third as the sum or difference of the
- // other two.
+ // TODO(morlovich): Remove one each of these triplets of histograms. We
+ // can calculate the third as the sum or difference of the other two.
RecordSyncOpenResult(cache_type_, OPEN_ENTRY_PLATFORM_FILE_ERROR,
had_index_);
SIMPLE_CACHE_UMA(ENUMERATION,
@@ -935,7 +1001,9 @@ bool SimpleSynchronousEntry::OpenFiles(SimpleEntryStat* out_entry_stat) {
}
File::Info file_info;
- bool success = files_[i].GetInfo(&file_info);
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(i));
+ bool success = file.IsOK() && file->GetInfo(&file_info);
base::Time file_last_modified;
if (!success) {
DLOG(WARNING) << "Could not get platform file info.";
@@ -982,9 +1050,8 @@ bool SimpleSynchronousEntry::CreateFiles(SimpleEntryStat* out_entry_stat) {
for (int i = 0; i < kSimpleEntryNormalFileCount; ++i) {
File::Error error;
if (!MaybeCreateFile(i, FILE_NOT_REQUIRED, &error)) {
- // TODO(juliatuttle,gavinp): Remove one each of these triplets of
- // histograms. We can calculate the third as the sum or difference of the
- // other two.
+ // TODO(morlovich): Remove one each of these triplets of histograms. We
+ // can calculate the third as the sum or difference of the other two.
RecordSyncCreateResult(CREATE_ENTRY_PLATFORM_FILE_ERROR, had_index_);
SIMPLE_CACHE_UMA(ENUMERATION,
"SyncCreatePlatformFileError", cache_type_,
@@ -1022,8 +1089,7 @@ void SimpleSynchronousEntry::CloseFile(int index) {
if (empty_file_omitted_[index]) {
empty_file_omitted_[index] = false;
} else {
- DCHECK(files_[index].IsValid());
- files_[index].Close();
+ file_tracker_->Close(this, SubFileForFileIndex(index));
}
}
@@ -1034,17 +1100,11 @@ void SimpleSynchronousEntry::CloseFiles() {
CloseSparseFile();
}
-bool SimpleSynchronousEntry::CheckHeaderAndKey(int file_index) {
- // TODO(gavinp): Frequently we are doing this at the same time as we read from
- // the beginning of an entry. It might improve performance to make a single
- // read(2) call rather than two separate reads. On the other hand, it would
- // mean an extra memory to memory copy. In the case where we are opening an
- // entry without a key, the kInitialHeaderRead setting means that we are
- // actually already reading stream 1 data here, and tossing it out.
+bool SimpleSynchronousEntry::CheckHeaderAndKey(base::File* file,
+ int file_index) {
std::vector<char> header_data(key_.empty() ? kInitialHeaderRead
: GetHeaderSize(key_.size()));
- int bytes_read =
- files_[file_index].Read(0, header_data.data(), header_data.size());
+ int bytes_read = file->Read(0, header_data.data(), header_data.size());
const SimpleFileHeader* header =
reinterpret_cast<const SimpleFileHeader*>(header_data.data());
@@ -1073,8 +1133,8 @@ bool SimpleSynchronousEntry::CheckHeaderAndKey(int file_index) {
int bytes_to_read = expected_header_size - old_size;
// This resize will invalidate iterators, since it is enlarging header_data.
header_data.resize(expected_header_size);
- int bytes_read = files_[file_index].Read(
- old_size, header_data.data() + old_size, bytes_to_read);
+ int bytes_read =
+ file->Read(old_size, header_data.data() + old_size, bytes_to_read);
if (bytes_read != bytes_to_read) {
RecordSyncOpenResult(cache_type_, OPEN_ENTRY_CANT_READ_KEY, had_index_);
return false;
@@ -1115,11 +1175,13 @@ int SimpleSynchronousEntry::InitializeForOpen(
continue;
if (key_.empty()) {
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(i));
// If |key_| is empty, we were opened via the iterator interface, without
// knowing what our key is. We must therefore read the header immediately
// to discover it, so SimpleEntryImpl can make it available to
// disk_cache::Entry::GetKey().
- if (!CheckHeaderAndKey(i))
+ if (!file.IsOK() || !CheckHeaderAndKey(file.get(), i))
return net::ERR_FAILED;
} else {
// If we do know which key were are looking for, we still need to
@@ -1162,7 +1224,8 @@ int SimpleSynchronousEntry::InitializeForOpen(
out_entry_stat->data_size(2) == 0) {
DVLOG(1) << "Removing empty stream 2 file.";
CloseFile(stream2_file_index);
- DeleteFileForEntryHash(path_, entry_hash_, stream2_file_index);
+ DeleteFileForEntryHash(path_, entry_file_key_.entry_hash,
+ stream2_file_index);
empty_file_omitted_[stream2_file_index] = true;
removed_stream2 = true;
}
@@ -1178,6 +1241,13 @@ int SimpleSynchronousEntry::InitializeForOpen(
bool SimpleSynchronousEntry::InitializeCreatedFile(
int file_index,
CreateEntryResult* out_result) {
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(file_index));
+ if (!file.IsOK()) {
+ *out_result = CREATE_ENTRY_CANT_WRITE_HEADER;
+ return false;
+ }
+
SimpleFileHeader header;
header.initial_magic_number = kSimpleInitialMagicNumber;
header.version = kSimpleEntryVersionOnDisk;
@@ -1185,15 +1255,14 @@ bool SimpleSynchronousEntry::InitializeCreatedFile(
header.key_length = key_.size();
header.key_hash = base::Hash(key_);
- int bytes_written = files_[file_index].Write(
- 0, reinterpret_cast<char*>(&header), sizeof(header));
+ int bytes_written =
+ file->Write(0, reinterpret_cast<char*>(&header), sizeof(header));
if (bytes_written != sizeof(header)) {
*out_result = CREATE_ENTRY_CANT_WRITE_HEADER;
return false;
}
- bytes_written = files_[file_index].Write(sizeof(header), key_.data(),
- key_.size());
+ bytes_written = file->Write(sizeof(header), key_.data(), key_.size());
if (bytes_written != base::checked_cast<int>(key_.size())) {
*out_result = CREATE_ENTRY_CANT_WRITE_KEY;
return false;
@@ -1228,6 +1297,11 @@ int SimpleSynchronousEntry::ReadAndValidateStream0AndMaybe1(
int file_size,
SimpleEntryStat* out_entry_stat,
SimpleStreamPrefetchData stream_prefetch_data[2]) {
+ SimpleFileTracker::FileHandle file =
+ file_tracker_->Acquire(this, SubFileForFileIndex(0));
+ if (!file.IsOK())
+ return net::ERR_FAILED;
+
// If the file is sufficiently small, we will prefetch everything --
// in which case |prefetch_buf| will be non-null, and we should look at it
// rather than call ::Read for the bits.
@@ -1239,7 +1313,7 @@ int SimpleSynchronousEntry::ReadAndValidateStream0AndMaybe1(
} else {
RecordWhetherOpenDidPrefetch(cache_type_, true);
prefetch_buf = std::make_unique<char[]>(file_size);
- if (files_[0].Read(0, prefetch_buf.get(), file_size) != file_size)
+ if (file->Read(0, prefetch_buf.get(), file_size) != file_size)
return net::ERR_FAILED;
file_0_prefetch.set(prefetch_buf.get(), file_size);
}
@@ -1248,7 +1322,7 @@ int SimpleSynchronousEntry::ReadAndValidateStream0AndMaybe1(
// out file 0's layout.
SimpleFileEOF stream_0_eof;
int rv = GetEOFRecordData(
- file_0_prefetch, /* file_index = */ 0,
+ file.get(), file_0_prefetch, /* file_index = */ 0,
/* file_offset = */ file_size - sizeof(SimpleFileEOF), &stream_0_eof);
if (rv != net::OK)
return rv;
@@ -1276,7 +1350,7 @@ int SimpleSynchronousEntry::ReadAndValidateStream0AndMaybe1(
out_entry_stat->set_data_size(1, stream1_size);
// Put stream 0 data in memory --- plus maybe the sha256(key) footer.
- rv = PreReadStreamPayload(file_0_prefetch, /* stream_index = */ 0,
+ rv = PreReadStreamPayload(file.get(), file_0_prefetch, /* stream_index = */ 0,
extra_post_stream_0_read, *out_entry_stat,
stream_0_eof, &stream_prefetch_data[0]);
if (rv != net::OK)
@@ -1287,13 +1361,14 @@ int SimpleSynchronousEntry::ReadAndValidateStream0AndMaybe1(
if (prefetch_buf && has_key_sha256) {
SimpleFileEOF stream_1_eof;
rv = GetEOFRecordData(
- file_0_prefetch, /* file_index = */ 0,
+ file.get(), file_0_prefetch, /* file_index = */ 0,
out_entry_stat->GetEOFOffsetInFile(key_.size(), /* stream_index = */ 1),
&stream_1_eof);
if (rv != net::OK)
return rv;
- rv = PreReadStreamPayload(file_0_prefetch, /* stream_index = */ 1,
+ rv = PreReadStreamPayload(file.get(), file_0_prefetch,
+ /* stream_index = */ 1,
/* extra_size = */ 0, *out_entry_stat,
stream_1_eof, &stream_prefetch_data[1]);
if (rv != net::OK)
@@ -1321,19 +1396,20 @@ int SimpleSynchronousEntry::ReadAndValidateStream0AndMaybe1(
// Ensure the key is validated before completion.
if (!has_key_sha256 && header_and_key_check_needed_[0])
- CheckHeaderAndKey(0);
+ CheckHeaderAndKey(file.get(), 0);
return net::OK;
}
bool SimpleSynchronousEntry::ReadFromFileOrPrefetched(
+ base::File* file,
base::StringPiece file_0_prefetch,
int file_index,
int offset,
int size,
char* dest) {
if (file_0_prefetch.empty() || file_index != 0) {
- return files_[file_index].Read(offset, dest, size) == size;
+ return file->Read(offset, dest, size) == size;
} else {
if (offset < 0 || size < 0)
return false;
@@ -1357,11 +1433,12 @@ bool SimpleSynchronousEntry::ReadFromFileOrPrefetched(
}
}
-int SimpleSynchronousEntry::GetEOFRecordData(base::StringPiece file_0_prefetch,
+int SimpleSynchronousEntry::GetEOFRecordData(base::File* file,
+ base::StringPiece file_0_prefetch,
int file_index,
int file_offset,
SimpleFileEOF* eof_record) {
- if (!ReadFromFileOrPrefetched(file_0_prefetch, file_index, file_offset,
+ if (!ReadFromFileOrPrefetched(file, file_0_prefetch, file_index, file_offset,
sizeof(SimpleFileEOF),
reinterpret_cast<char*>(eof_record))) {
RecordCheckEOFResult(cache_type_, CHECK_EOF_RESULT_READ_FAILURE);
@@ -1383,15 +1460,16 @@ int SimpleSynchronousEntry::GetEOFRecordData(base::StringPiece file_0_prefetch,
}
void SimpleSynchronousEntry::Doom() const {
- DeleteFilesForEntryHash(path_, entry_hash_);
+ DCHECK_EQ(0u, entry_file_key_.doom_generation);
+ DeleteFilesForEntryHash(path_, entry_file_key_.entry_hash);
}
// static
bool SimpleSynchronousEntry::DeleteFileForEntryHash(const FilePath& path,
const uint64_t entry_hash,
const int file_index) {
- FilePath to_delete = path.AppendASCII(
- GetFilenameFromEntryHashAndFileIndex(entry_hash, file_index));
+ FilePath to_delete = path.AppendASCII(GetFilenameFromEntryFileKeyAndFileIndex(
+ SimpleFileTracker::EntryFileKey(entry_hash), file_index));
return simple_util::SimpleCacheDeleteFile(to_delete);
}
@@ -1404,8 +1482,8 @@ bool SimpleSynchronousEntry::DeleteFilesForEntryHash(
if (!DeleteFileForEntryHash(path, entry_hash, i) && !CanOmitEmptyFile(i))
result = false;
}
- FilePath to_delete = path.AppendASCII(
- GetSparseFilenameFromEntryHash(entry_hash));
+ FilePath to_delete = path.AppendASCII(GetSparseFilenameFromEntryFileKey(
+ SimpleFileTracker::EntryFileKey(entry_hash)));
simple_util::SimpleCacheDeleteFile(to_delete);
return result;
}
@@ -1414,15 +1492,16 @@ bool SimpleSynchronousEntry::DeleteFilesForEntryHash(
bool SimpleSynchronousEntry::TruncateFilesForEntryHash(
const FilePath& path,
const uint64_t entry_hash) {
+ SimpleFileTracker::EntryFileKey file_key(entry_hash);
bool result = true;
for (int i = 0; i < kSimpleEntryNormalFileCount; ++i) {
FilePath filename_to_truncate =
- path.AppendASCII(GetFilenameFromEntryHashAndFileIndex(entry_hash, i));
+ path.AppendASCII(GetFilenameFromEntryFileKeyAndFileIndex(file_key, i));
if (!TruncatePath(filename_to_truncate))
result = false;
}
FilePath to_delete =
- path.AppendASCII(GetSparseFilenameFromEntryHash(entry_hash));
+ path.AppendASCII(GetSparseFilenameFromEntryFileKey(file_key));
TruncatePath(to_delete);
return result;
}
@@ -1445,48 +1524,62 @@ void SimpleSynchronousEntry::RecordSyncCreateResult(CreateEntryResult result,
FilePath SimpleSynchronousEntry::GetFilenameFromFileIndex(int file_index) {
return path_.AppendASCII(
- GetFilenameFromEntryHashAndFileIndex(entry_hash_, file_index));
+ GetFilenameFromEntryFileKeyAndFileIndex(entry_file_key_, file_index));
}
bool SimpleSynchronousEntry::OpenSparseFileIfExists(
int32_t* out_sparse_data_size) {
DCHECK(!sparse_file_open());
- FilePath filename = path_.AppendASCII(
- GetSparseFilenameFromEntryHash(entry_hash_));
+ FilePath filename =
+ path_.AppendASCII(GetSparseFilenameFromEntryFileKey(entry_file_key_));
int flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE |
File::FLAG_SHARE_DELETE;
- sparse_file_.Initialize(filename, flags);
- if (sparse_file_.IsValid())
- return ScanSparseFile(out_sparse_data_size);
+ std::unique_ptr<base::File> sparse_file =
+ std::make_unique<base::File>(filename, flags);
+ if (!sparse_file->IsValid())
+ // No file -> OK, file open error -> trouble.
+ return sparse_file->error_details() == File::FILE_ERROR_NOT_FOUND;
+
+ if (!ScanSparseFile(sparse_file.get(), out_sparse_data_size))
+ return false;
- return sparse_file_.error_details() == File::FILE_ERROR_NOT_FOUND;
+ file_tracker_->Register(this, SimpleFileTracker::SubFile::FILE_SPARSE,
+ std::move(sparse_file));
+ sparse_file_open_ = true;
+ return true;
}
bool SimpleSynchronousEntry::CreateSparseFile() {
DCHECK(!sparse_file_open());
- FilePath filename = path_.AppendASCII(
- GetSparseFilenameFromEntryHash(entry_hash_));
+ FilePath filename =
+ path_.AppendASCII(GetSparseFilenameFromEntryFileKey(entry_file_key_));
int flags = File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE |
File::FLAG_SHARE_DELETE;
- sparse_file_.Initialize(filename, flags);
- if (!sparse_file_.IsValid())
+ std::unique_ptr<base::File> sparse_file =
+ std::make_unique<base::File>(filename, flags);
+ if (!sparse_file->IsValid())
return false;
-
- return InitializeSparseFile();
+ if (!InitializeSparseFile(sparse_file.get()))
+ return false;
+ file_tracker_->Register(this, SimpleFileTracker::SubFile::FILE_SPARSE,
+ std::move(sparse_file));
+ sparse_file_open_ = true;
+ return true;
}
void SimpleSynchronousEntry::CloseSparseFile() {
DCHECK(sparse_file_open());
- sparse_file_.Close();
+ file_tracker_->Close(this, SimpleFileTracker::SubFile::FILE_SPARSE);
+ sparse_file_open_ = false;
}
-bool SimpleSynchronousEntry::TruncateSparseFile() {
+bool SimpleSynchronousEntry::TruncateSparseFile(base::File* sparse_file) {
DCHECK(sparse_file_open());
int64_t header_and_key_length = sizeof(SimpleFileHeader) + key_.size();
- if (!sparse_file_.SetLength(header_and_key_length)) {
+ if (!sparse_file->SetLength(header_and_key_length)) {
DLOG(WARNING) << "Could not truncate sparse file";
return false;
}
@@ -1497,9 +1590,7 @@ bool SimpleSynchronousEntry::TruncateSparseFile() {
return true;
}
-bool SimpleSynchronousEntry::InitializeSparseFile() {
- DCHECK(sparse_file_open());
-
+bool SimpleSynchronousEntry::InitializeSparseFile(base::File* sparse_file) {
SimpleFileHeader header;
header.initial_magic_number = kSimpleInitialMagicNumber;
header.version = kSimpleVersion;
@@ -1507,14 +1598,14 @@ bool SimpleSynchronousEntry::InitializeSparseFile() {
header.key_hash = base::Hash(key_);
int header_write_result =
- sparse_file_.Write(0, reinterpret_cast<char*>(&header), sizeof(header));
+ sparse_file->Write(0, reinterpret_cast<char*>(&header), sizeof(header));
if (header_write_result != sizeof(header)) {
DLOG(WARNING) << "Could not write sparse file header";
return false;
}
- int key_write_result = sparse_file_.Write(sizeof(header), key_.data(),
- key_.size());
+ int key_write_result =
+ sparse_file->Write(sizeof(header), key_.data(), key_.size());
if (key_write_result != base::checked_cast<int>(key_.size())) {
DLOG(WARNING) << "Could not write sparse file key";
return false;
@@ -1526,14 +1617,13 @@ bool SimpleSynchronousEntry::InitializeSparseFile() {
return true;
}
-bool SimpleSynchronousEntry::ScanSparseFile(int32_t* out_sparse_data_size) {
- DCHECK(sparse_file_open());
-
+bool SimpleSynchronousEntry::ScanSparseFile(base::File* sparse_file,
+ int32_t* out_sparse_data_size) {
int64_t sparse_data_size = 0;
SimpleFileHeader header;
int header_read_result =
- sparse_file_.Read(0, reinterpret_cast<char*>(&header), sizeof(header));
+ sparse_file->Read(0, reinterpret_cast<char*>(&header), sizeof(header));
if (header_read_result != sizeof(header)) {
DLOG(WARNING) << "Could not read header from sparse file.";
return false;
@@ -1555,10 +1645,9 @@ bool SimpleSynchronousEntry::ScanSparseFile(int32_t* out_sparse_data_size) {
int64_t range_header_offset = sizeof(header) + key_.size();
while (1) {
SimpleFileSparseRangeHeader range_header;
- int range_header_read_result =
- sparse_file_.Read(range_header_offset,
- reinterpret_cast<char*>(&range_header),
- sizeof(range_header));
+ int range_header_read_result = sparse_file->Read(
+ range_header_offset, reinterpret_cast<char*>(&range_header),
+ sizeof(range_header));
if (range_header_read_result == 0)
break;
if (range_header_read_result != sizeof(range_header)) {
@@ -1591,14 +1680,17 @@ bool SimpleSynchronousEntry::ScanSparseFile(int32_t* out_sparse_data_size) {
return true;
}
-bool SimpleSynchronousEntry::ReadSparseRange(const SparseRange* range,
- int offset, int len, char* buf) {
+bool SimpleSynchronousEntry::ReadSparseRange(base::File* sparse_file,
+ const SparseRange* range,
+ int offset,
+ int len,
+ char* buf) {
DCHECK(range);
DCHECK(buf);
DCHECK_LE(offset, range->length);
DCHECK_LE(offset + len, range->length);
- int bytes_read = sparse_file_.Read(range->file_offset + offset, buf, len);
+ int bytes_read = sparse_file->Read(range->file_offset + offset, buf, len);
if (bytes_read < len) {
DLOG(WARNING) << "Could not read sparse range.";
return false;
@@ -1611,13 +1703,15 @@ bool SimpleSynchronousEntry::ReadSparseRange(const SparseRange* range,
return false;
}
}
- // TODO(juliatuttle): Incremental crc32 calculation?
+ // TODO(morlovich): Incremental crc32 calculation?
return true;
}
-bool SimpleSynchronousEntry::WriteSparseRange(SparseRange* range,
- int offset, int len,
+bool SimpleSynchronousEntry::WriteSparseRange(base::File* sparse_file,
+ SparseRange* range,
+ int offset,
+ int len,
const char* buf) {
DCHECK(range);
DCHECK(buf);
@@ -1638,16 +1732,16 @@ bool SimpleSynchronousEntry::WriteSparseRange(SparseRange* range,
header.length = range->length;
header.data_crc32 = range->data_crc32;
- int bytes_written = sparse_file_.Write(range->file_offset - sizeof(header),
- reinterpret_cast<char*>(&header),
- sizeof(header));
+ int bytes_written =
+ sparse_file->Write(range->file_offset - sizeof(header),
+ reinterpret_cast<char*>(&header), sizeof(header));
if (bytes_written != base::checked_cast<int>(sizeof(header))) {
DLOG(WARNING) << "Could not rewrite sparse range header.";
return false;
}
}
- int bytes_written = sparse_file_.Write(range->file_offset + offset, buf, len);
+ int bytes_written = sparse_file->Write(range->file_offset + offset, buf, len);
if (bytes_written < len) {
DLOG(WARNING) << "Could not write sparse range.";
return false;
@@ -1656,7 +1750,8 @@ bool SimpleSynchronousEntry::WriteSparseRange(SparseRange* range,
return true;
}
-bool SimpleSynchronousEntry::AppendSparseRange(int64_t offset,
+bool SimpleSynchronousEntry::AppendSparseRange(base::File* sparse_file,
+ int64_t offset,
int len,
const char* buf) {
DCHECK_GE(offset, 0);
@@ -1671,16 +1766,15 @@ bool SimpleSynchronousEntry::AppendSparseRange(int64_t offset,
header.length = len;
header.data_crc32 = data_crc32;
- int bytes_written = sparse_file_.Write(sparse_tail_offset_,
- reinterpret_cast<char*>(&header),
- sizeof(header));
+ int bytes_written = sparse_file->Write(
+ sparse_tail_offset_, reinterpret_cast<char*>(&header), sizeof(header));
if (bytes_written != base::checked_cast<int>(sizeof(header))) {
DLOG(WARNING) << "Could not append sparse range header.";
return false;
}
sparse_tail_offset_ += bytes_written;
- bytes_written = sparse_file_.Write(sparse_tail_offset_, buf, len);
+ bytes_written = sparse_file->Write(sparse_tail_offset_, buf, len);
if (bytes_written < len) {
DLOG(WARNING) << "Could not append sparse range data.";
return false;
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.h b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
index 1ddda90be1e..38779a7ef7e 100644
--- a/chromium/net/disk_cache/simple/simple_synchronous_entry.h
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
@@ -24,6 +24,7 @@
#include "net/base/cache_type.h"
#include "net/base/net_export.h"
#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_file_tracker.h"
namespace net {
class GrowableIOBuffer;
@@ -163,6 +164,7 @@ class SimpleSynchronousEntry {
uint64_t entry_hash,
bool had_index,
const base::TimeTicks& time_enqueued,
+ SimpleFileTracker* file_tracker,
SimpleEntryCreationResults* out_results);
static void CreateEntry(net::CacheType cache_type,
@@ -171,6 +173,7 @@ class SimpleSynchronousEntry {
uint64_t entry_hash,
bool had_index,
const base::TimeTicks& time_enqueued,
+ SimpleFileTracker* file_tracker,
SimpleEntryCreationResults* out_results);
// Deletes an entry from the file system without affecting the state of the
@@ -207,7 +210,8 @@ class SimpleSynchronousEntry {
net::IOBuffer* in_buf,
SimpleEntryStat* out_entry_stat,
int* out_result);
- int CheckEOFRecord(int stream_index,
+ int CheckEOFRecord(base::File* file,
+ int stream_index,
const SimpleEntryStat& entry_stat,
uint32_t expected_crc32);
@@ -232,10 +236,14 @@ class SimpleSynchronousEntry {
const base::FilePath& path() const { return path_; }
std::string key() const { return key_; }
+ const SimpleFileTracker::EntryFileKey& entry_file_key() const {
+ return entry_file_key_;
+ }
private:
FRIEND_TEST_ALL_PREFIXES(::DiskCacheBackendTest,
SimpleCacheEnumerationLongKeys);
+ friend class SimpleFileTrackerTest;
enum CreateEntryResult {
CREATE_ENTRY_SUCCESS = 0,
@@ -266,15 +274,17 @@ class SimpleSynchronousEntry {
// make it likely the entire key is read.
static const size_t kInitialHeaderRead = 64 * 1024;
- SimpleSynchronousEntry(net::CacheType cache_type,
- const base::FilePath& path,
- const std::string& key,
- uint64_t entry_hash,
- bool had_index);
+ NET_EXPORT_PRIVATE SimpleSynchronousEntry(
+ net::CacheType cache_type,
+ const base::FilePath& path,
+ const std::string& key,
+ uint64_t entry_hash,
+ bool had_index,
+ SimpleFileTracker* simple_file_tracker);
// Like Entry, the SimpleSynchronousEntry self releases when Close() is
// called.
- ~SimpleSynchronousEntry();
+ NET_EXPORT_PRIVATE ~SimpleSynchronousEntry();
// Tries to open one of the cache entry files. Succeeds if the open succeeds
// or if the file was not found and is allowed to be omitted if the
@@ -296,7 +306,7 @@ class SimpleSynchronousEntry {
// they are correct. If this entry was opened with a key, the key is checked
// for a match. If not, then the |key_| member is set based on the value in
// this header. Records histograms if any check is failed.
- bool CheckHeaderAndKey(int file_index);
+ bool CheckHeaderAndKey(base::File* file, int file_index);
// Returns a net error, i.e. net::OK on success.
int InitializeForOpen(SimpleEntryStat* out_entry_stat,
@@ -320,31 +330,34 @@ class SimpleSynchronousEntry {
SimpleStreamPrefetchData stream_prefetch_data[2]);
// Reads the EOF record located at |file_offset| in file |file_index|,
- // with |file_0_prefetch| ptentially having prefetched file 0 content.
+ // with |file_0_prefetch| potentially having prefetched file 0 content.
// Puts the result into |*eof_record| and sanity-checks it.
// Returns net status, and records any failures to UMA.
- int GetEOFRecordData(base::StringPiece file_0_prefetch,
+ int GetEOFRecordData(base::File* file,
+ base::StringPiece file_0_prefetch,
int file_index,
int file_offset,
SimpleFileEOF* eof_record);
- // Reads either from |file_0_prefetch| or files_[file_index].
+ // Reads either from |file_0_prefetch| or |file|.
// Range-checks all the in-memory reads.
- bool ReadFromFileOrPrefetched(base::StringPiece file_0_prefetch,
+ bool ReadFromFileOrPrefetched(base::File* file,
+ base::StringPiece file_0_prefetch,
int file_index,
int offset,
int size,
char* dest);
// Extracts out the payload of stream |stream_index|, reading either from
- // |file_0_prefetch|, if available, or the file. |entry_stat| will be used to
+ // |file_0_prefetch|, if available, or |file|. |entry_stat| will be used to
// determine file layout, though |extra_size| additional bytes will be read
// past the stream payload end.
//
// |*stream_data| will be pointed to a fresh buffer with the results,
// and |*out_crc32| will get the checksum, which will be verified against
// |eof_record|.
- int PreReadStreamPayload(base::StringPiece file_0_prefetch,
+ int PreReadStreamPayload(base::File* file,
+ base::StringPiece file_0_prefetch,
int stream_index,
int extra_size,
const SimpleEntryStat& entry_stat,
@@ -363,28 +376,37 @@ class SimpleSynchronousEntry {
void CloseSparseFile();
// Writes the header to the (newly-created) sparse file.
- bool InitializeSparseFile();
+ bool InitializeSparseFile(base::File* file);
// Removes all but the header of the sparse file.
- bool TruncateSparseFile();
+ bool TruncateSparseFile(base::File* sparse_file);
// Scans the existing ranges in the sparse file. Populates |sparse_ranges_|
// and sets |*out_sparse_data_size| to the total size of all the ranges (not
// including headers).
- bool ScanSparseFile(int32_t* out_sparse_data_size);
+ bool ScanSparseFile(base::File* sparse_file, int32_t* out_sparse_data_size);
// Reads from a single sparse range. If asked to read the entire range, also
// verifies the CRC32.
- bool ReadSparseRange(const SparseRange* range,
- int offset, int len, char* buf);
+ bool ReadSparseRange(base::File* sparse_file,
+ const SparseRange* range,
+ int offset,
+ int len,
+ char* buf);
// Writes to a single (existing) sparse range. If asked to write the entire
// range, also updates the CRC32; otherwise, invalidates it.
- bool WriteSparseRange(SparseRange* range,
- int offset, int len, const char* buf);
+ bool WriteSparseRange(base::File* sparse_file,
+ SparseRange* range,
+ int offset,
+ int len,
+ const char* buf);
// Appends a new sparse range to the sparse data file.
- bool AppendSparseRange(int64_t offset, int len, const char* buf);
+ bool AppendSparseRange(base::File* sparse_file,
+ int64_t offset,
+ int len,
+ const char* buf);
static bool DeleteFileForEntryHash(const base::FilePath& path,
uint64_t entry_hash,
@@ -398,13 +420,11 @@ class SimpleSynchronousEntry {
base::FilePath GetFilenameFromFileIndex(int file_index);
- bool sparse_file_open() const {
- return sparse_file_.IsValid();
- }
+ bool sparse_file_open() const { return sparse_file_open_; }
const net::CacheType cache_type_;
const base::FilePath path_;
- const uint64_t entry_hash_;
+ SimpleFileTracker::EntryFileKey entry_file_key_;
const bool had_index_;
std::string key_;
@@ -418,7 +438,7 @@ class SimpleSynchronousEntry {
false,
};
- base::File files_[kSimpleEntryNormalFileCount];
+ SimpleFileTracker* file_tracker_;
// True if the corresponding stream is empty and therefore no on-disk file
// was created to store it.
@@ -427,7 +447,8 @@ class SimpleSynchronousEntry {
typedef std::map<int64_t, SparseRange> SparseRangeOffsetMap;
typedef SparseRangeOffsetMap::iterator SparseRangeIterator;
SparseRangeOffsetMap sparse_ranges_;
- base::File sparse_file_;
+ bool sparse_file_open_;
+
// Offset of the end of the sparse file (where the next sparse range will be
// written).
int64_t sparse_tail_offset_;
diff --git a/chromium/net/disk_cache/simple/simple_util.cc b/chromium/net/disk_cache/simple/simple_util.cc
index 4e0869f43e1..ea56edfade2 100644
--- a/chromium/net/disk_cache/simple/simple_util.cc
+++ b/chromium/net/disk_cache/simple/simple_util.cc
@@ -60,13 +60,21 @@ uint64_t GetEntryHashKey(const std::string& key) {
return u.key_hash;
}
-std::string GetFilenameFromEntryHashAndFileIndex(uint64_t entry_hash,
- int file_index) {
- return base::StringPrintf("%016" PRIx64 "_%1d", entry_hash, file_index);
+std::string GetFilenameFromEntryFileKeyAndFileIndex(
+ const SimpleFileTracker::EntryFileKey& key,
+ int file_index) {
+ // TODO(morlovich): create a todelete_..._ file for a non-zero
+ // doom_generation.
+ DCHECK_EQ(0u, key.doom_generation);
+ return base::StringPrintf("%016" PRIx64 "_%1d", key.entry_hash, file_index);
}
-std::string GetSparseFilenameFromEntryHash(uint64_t entry_hash) {
- return base::StringPrintf("%016" PRIx64 "_s", entry_hash);
+std::string GetSparseFilenameFromEntryFileKey(
+ const SimpleFileTracker::EntryFileKey& key) {
+ // TODO(morlovich): create a todelete_..._ file for a non-zero
+ // doom_generation.
+ DCHECK_EQ(0u, key.doom_generation);
+ return base::StringPrintf("%016" PRIx64 "_s", key.entry_hash);
}
std::string GetFilenameFromKeyAndFileIndex(const std::string& key,
diff --git a/chromium/net/disk_cache/simple/simple_util.h b/chromium/net/disk_cache/simple/simple_util.h
index 9ea7548c80a..e40dd262c5d 100644
--- a/chromium/net/disk_cache/simple/simple_util.h
+++ b/chromium/net/disk_cache/simple/simple_util.h
@@ -11,6 +11,7 @@
#include "base/strings/string_piece.h"
#include "net/base/net_export.h"
+#include "net/disk_cache/simple/simple_file_tracker.h"
namespace base {
class FilePath;
@@ -46,14 +47,14 @@ NET_EXPORT_PRIVATE std::string GetFilenameFromKeyAndFileIndex(
const std::string& key,
int file_index);
-// Same as |GetFilenameFromKeyAndIndex| above, but using a hex string.
-NET_EXPORT_PRIVATE std::string GetFilenameFromEntryHashAndFileIndex(
- uint64_t entry_hash,
+// Same as |GetFilenameFromKeyAndIndex| above, but using a numeric key.
+NET_EXPORT_PRIVATE std::string GetFilenameFromEntryFileKeyAndFileIndex(
+ const SimpleFileTracker::EntryFileKey& key,
int file_index);
// Given a |key| for an entry, returns the name of the sparse data file.
-NET_EXPORT_PRIVATE std::string GetSparseFilenameFromEntryHash(
- uint64_t entry_hash);
+NET_EXPORT_PRIVATE std::string GetSparseFilenameFromEntryFileKey(
+ const SimpleFileTracker::EntryFileKey& key);
// Given the size of a key, the size in bytes of the header at the beginning
// of a simple cache file.
diff --git a/chromium/net/disk_cache/simple/simple_util_win.cc b/chromium/net/disk_cache/simple/simple_util_win.cc
index f2adcd04b89..d296cb3f1d7 100644
--- a/chromium/net/disk_cache/simple/simple_util_win.cc
+++ b/chromium/net/disk_cache/simple/simple_util_win.cc
@@ -24,7 +24,7 @@ bool SimpleCacheDeleteFile(const base::FilePath& path) {
// Why a random name? Because if the name was derived from our original name,
// then churn on a particular cache entry could cause flakey behaviour.
- // TODO(gavinp): Ensure these "todelete_" files are cleaned up on periodic
+ // TODO(morlovich): Ensure these "todelete_" files are cleaned up on periodic
// directory sweeps.
const base::FilePath rename_target =
path.DirName().AppendASCII(base::StringPrintf("todelete_%016" PRIx64,