summaryrefslogtreecommitdiff
path: root/chromium/net/disk_cache
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2016-07-14 17:41:05 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2016-08-04 12:37:36 +0000
commit399c965b6064c440ddcf4015f5f8e9d131c7a0a6 (patch)
tree6b06b60ff365abef0e13b3503d593a0df48d20e8 /chromium/net/disk_cache
parent7366110654eec46f21b6824f302356426f48cd74 (diff)
downloadqtwebengine-chromium-399c965b6064c440ddcf4015f5f8e9d131c7a0a6.tar.gz
BASELINE: Update Chromium to 52.0.2743.76 and Ninja to 1.7.1
Change-Id: I382f51b959689505a60f8b707255ecb344f7d8b4 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/net/disk_cache')
-rw-r--r--chromium/net/disk_cache/backend_unittest.cc114
-rw-r--r--chromium/net/disk_cache/blockfile/addr.cc38
-rw-r--r--chromium/net/disk_cache/blockfile/addr.h7
-rw-r--r--chromium/net/disk_cache/blockfile/addr_unittest.cc22
-rw-r--r--chromium/net/disk_cache/blockfile/backend_impl.cc28
-rw-r--r--chromium/net/disk_cache/blockfile/backend_impl.h11
-rw-r--r--chromium/net/disk_cache/blockfile/backend_impl_v3.cc1544
-rw-r--r--chromium/net/disk_cache/blockfile/backend_impl_v3.h287
-rw-r--r--chromium/net/disk_cache/blockfile/backend_worker_v3.cc471
-rw-r--r--chromium/net/disk_cache/blockfile/backend_worker_v3.h57
-rw-r--r--chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc179
-rw-r--r--chromium/net/disk_cache/blockfile/block_bitmaps_v3.h66
-rw-r--r--chromium/net/disk_cache/blockfile/block_bitmaps_v3_unittest.cc70
-rw-r--r--chromium/net/disk_cache/blockfile/block_files.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/block_files.h5
-rw-r--r--chromium/net/disk_cache/blockfile/disk_cache_perftest.cc260
-rw-r--r--chromium/net/disk_cache/blockfile/disk_format_v3.h248
-rw-r--r--chromium/net/disk_cache/blockfile/entry_impl.cc12
-rw-r--r--chromium/net/disk_cache/blockfile/entry_impl.h7
-rw-r--r--chromium/net/disk_cache/blockfile/entry_impl_v3.cc1483
-rw-r--r--chromium/net/disk_cache/blockfile/entry_impl_v3.h223
-rw-r--r--chromium/net/disk_cache/blockfile/eviction.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/eviction_v3.cc516
-rw-r--r--chromium/net/disk_cache/blockfile/eviction_v3.h78
-rw-r--r--chromium/net/disk_cache/blockfile/file_win.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/histogram_macros_v3.h111
-rw-r--r--chromium/net/disk_cache/blockfile/in_flight_backend_io.cc4
-rw-r--r--chromium/net/disk_cache/blockfile/in_flight_backend_io.h6
-rw-r--r--chromium/net/disk_cache/blockfile/in_flight_io.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/index_table_v3.cc1152
-rw-r--r--chromium/net/disk_cache/blockfile/index_table_v3.h286
-rw-r--r--chromium/net/disk_cache/blockfile/index_table_v3_unittest.cc711
-rw-r--r--chromium/net/disk_cache/blockfile/mapped_file.cc5
-rw-r--r--chromium/net/disk_cache/blockfile/mapped_file_posix.cc2
-rw-r--r--chromium/net/disk_cache/blockfile/mapped_file_win.cc5
-rw-r--r--chromium/net/disk_cache/blockfile/rankings.cc12
-rw-r--r--chromium/net/disk_cache/blockfile/rankings.h6
-rw-r--r--chromium/net/disk_cache/blockfile/sparse_control.cc4
-rw-r--r--chromium/net/disk_cache/blockfile/sparse_control_v3.cc873
-rw-r--r--chromium/net/disk_cache/blockfile/sparse_control_v3.h180
-rw-r--r--chromium/net/disk_cache/blockfile/stats_unittest.cc11
-rw-r--r--chromium/net/disk_cache/cache_creator.cc10
-rw-r--r--chromium/net/disk_cache/disk_cache.h8
-rw-r--r--chromium/net/disk_cache/disk_cache_perftest.cc304
-rw-r--r--chromium/net/disk_cache/disk_cache_test_base.cc17
-rw-r--r--chromium/net/disk_cache/disk_cache_test_base.h14
-rw-r--r--chromium/net/disk_cache/disk_cache_test_util.cc4
-rw-r--r--chromium/net/disk_cache/entry_unittest.cc121
-rw-r--r--chromium/net/disk_cache/memory/mem_backend_impl.cc10
-rw-r--r--chromium/net/disk_cache/memory/mem_backend_impl.h9
-rw-r--r--chromium/net/disk_cache/memory/mem_entry_impl.cc4
-rw-r--r--chromium/net/disk_cache/memory/mem_entry_impl.h8
-rw-r--r--chromium/net/disk_cache/net_log_parameters.cc24
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_impl.cc78
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_impl.h19
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_version.h2
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_format.h9
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.cc116
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.h46
-rw-r--r--chromium/net/disk_cache/simple/simple_index.cc32
-rw-r--r--chromium/net/disk_cache/simple/simple_index.h34
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file.cc134
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file.h36
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file_posix.cc4
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file_unittest.cc74
-rw-r--r--chromium/net/disk_cache/simple/simple_index_unittest.cc13
-rw-r--r--chromium/net/disk_cache/simple/simple_net_log_parameters.cc8
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.cc410
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.h72
-rw-r--r--chromium/net/disk_cache/simple/simple_test_util.cc81
-rw-r--r--chromium/net/disk_cache/simple/simple_test_util.h8
-rw-r--r--chromium/net/disk_cache/simple/simple_util.cc16
-rw-r--r--chromium/net/disk_cache/simple/simple_util.h12
-rw-r--r--chromium/net/disk_cache/simple/simple_util_unittest.cc11
-rw-r--r--chromium/net/disk_cache/simple/simple_version_upgrade.cc55
75 files changed, 1440 insertions, 9465 deletions
diff --git a/chromium/net/disk_cache/backend_unittest.cc b/chromium/net/disk_cache/backend_unittest.cc
index 0c57a5bc169..dd15fae2bdc 100644
--- a/chromium/net/disk_cache/backend_unittest.cc
+++ b/chromium/net/disk_cache/backend_unittest.cc
@@ -14,9 +14,9 @@
#include "base/strings/stringprintf.h"
#include "base/test/mock_entropy_provider.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
-#include "base/thread_task_runner_handle.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "net/base/cache_type.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
@@ -33,6 +33,7 @@
#include "net/disk_cache/simple/simple_backend_impl.h"
#include "net/disk_cache/simple/simple_entry_format.h"
#include "net/disk_cache/simple/simple_index.h"
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
#include "net/disk_cache/simple/simple_test_util.h"
#include "net/disk_cache/simple/simple_util.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -50,21 +51,21 @@ namespace {
const char kExistingEntryKey[] = "existing entry key";
-scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
+std::unique_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
const base::Thread& cache_thread,
base::FilePath& cache_path) {
net::TestCompletionCallback cb;
- scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ std::unique_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
cache_path, cache_thread.task_runner(), NULL));
int rv = cache->Init(cb.callback());
if (cb.GetResult(rv) != net::OK)
- return scoped_ptr<disk_cache::BackendImpl>();
+ return std::unique_ptr<disk_cache::BackendImpl>();
disk_cache::Entry* entry = NULL;
rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
if (cb.GetResult(rv) != net::OK)
- return scoped_ptr<disk_cache::BackendImpl>();
+ return std::unique_ptr<disk_cache::BackendImpl>();
entry->Close();
return cache;
@@ -253,16 +254,19 @@ void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
bool DiskCacheBackendTest::CreateSetOfRandomEntries(
std::set<std::string>* key_pool) {
const int kNumEntries = 10;
+ const int initial_entry_count = cache_->GetEntryCount();
for (int i = 0; i < kNumEntries; ++i) {
std::string key = GenerateKey(true);
disk_cache::Entry* entry;
- if (CreateEntry(key, &entry) != net::OK)
+ if (CreateEntry(key, &entry) != net::OK) {
return false;
+ }
key_pool->insert(key);
entry->Close();
}
- return key_pool->size() == static_cast<size_t>(cache_->GetEntryCount());
+ return key_pool->size() ==
+ static_cast<size_t>(cache_->GetEntryCount() - initial_entry_count);
}
// Performs iteration over the backend and checks that the keys of entries
@@ -448,7 +452,7 @@ TEST_F(DiskCacheTest, CreateBackend) {
base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
// Test the private factory method(s).
- scoped_ptr<disk_cache::Backend> cache;
+ std::unique_ptr<disk_cache::Backend> cache;
cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
ASSERT_TRUE(cache.get());
cache.reset();
@@ -491,7 +495,7 @@ TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
net::TestCompletionCallback cb;
bool prev = base::ThreadRestrictions::SetIOAllowed(false);
- scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ std::unique_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
cache_path_, cache_thread.task_runner(), NULL));
int rv = cache->Init(cb.callback());
EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
@@ -584,7 +588,7 @@ TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
ASSERT_TRUE(store.CreateUniqueTempDir());
net::TestCompletionCallback cb;
- scoped_ptr<disk_cache::Backend> extra_cache;
+ std::unique_ptr<disk_cache::Backend> extra_cache;
int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
net::CACHE_BACKEND_DEFAULT,
store.path(),
@@ -747,7 +751,7 @@ TEST_F(DiskCacheTest, TruncatedIndex) {
base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
net::TestCompletionCallback cb;
- scoped_ptr<disk_cache::Backend> backend;
+ std::unique_ptr<disk_cache::Backend> backend;
int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
net::CACHE_BACKEND_BLOCKFILE,
cache_path_,
@@ -1300,7 +1304,7 @@ void DiskCacheBackendTest::BackendEnumerations() {
Time final = Time::Now();
disk_cache::Entry* entry;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
int count = 0;
Time last_modified[kNumEntries];
Time last_used[kNumEntries];
@@ -1372,7 +1376,7 @@ void DiskCacheBackendTest::BackendEnumerations2() {
// Make sure that the timestamp is not the same.
AddDelay();
ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
EXPECT_EQ(entry2->GetKey(), second);
@@ -1453,7 +1457,7 @@ TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
entry1->Close();
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
EXPECT_EQ(entry2->GetKey(), second);
entry2->Close();
@@ -1485,7 +1489,7 @@ void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
SimulateCrash();
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
int count = 0;
while (iter->OpenNextEntry(&entry) == net::OK) {
ASSERT_TRUE(NULL != entry);
@@ -1527,7 +1531,8 @@ void DiskCacheBackendTest::BackendFixEnumerators() {
EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
disk_cache::Entry *entry1, *entry2;
- scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator();
+ std::unique_ptr<TestIterator> iter1 = CreateIterator(),
+ iter2 = CreateIterator();
ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
ASSERT_TRUE(NULL != entry1);
entry1->Close();
@@ -1970,7 +1975,7 @@ TEST_F(DiskCacheTest, WrongVersion) {
base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
net::TestCompletionCallback cb;
- scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ std::unique_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
cache_path_, cache_thread.task_runner(), NULL));
int rv = cache->Init(cb.callback());
ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
@@ -1986,7 +1991,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
ASSERT_TRUE(cache_thread.StartWithOptions(
base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
- scoped_ptr<disk_cache::BackendImpl> cache =
+ std::unique_ptr<disk_cache::BackendImpl> cache =
CreateExistingEntryCache(cache_thread, cache_path_);
ASSERT_TRUE(cache.get());
cache.reset();
@@ -1997,7 +2002,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
"ExperimentControl");
net::TestCompletionCallback cb;
- scoped_ptr<disk_cache::Backend> base_cache;
+ std::unique_ptr<disk_cache::Backend> base_cache;
int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
net::CACHE_BACKEND_BLOCKFILE,
cache_path_,
@@ -2025,7 +2030,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
ASSERT_TRUE(cache_thread.StartWithOptions(
base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
- scoped_ptr<disk_cache::BackendImpl> cache =
+ std::unique_ptr<disk_cache::BackendImpl> cache =
CreateExistingEntryCache(cache_thread, cache_path_);
ASSERT_TRUE(cache.get());
@@ -2061,7 +2066,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
"ExperimentControl");
- scoped_ptr<disk_cache::BackendImpl> cache =
+ std::unique_ptr<disk_cache::BackendImpl> cache =
CreateExistingEntryCache(cache_thread, cache_path_);
ASSERT_TRUE(cache.get());
}
@@ -2074,7 +2079,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
const int kRestartCount = 5;
for (int i = 0; i < kRestartCount; ++i) {
- scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ std::unique_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
cache_path_, cache_thread.task_runner(), NULL));
int rv = cache->Init(cb.callback());
ASSERT_EQ(net::OK, cb.GetResult(rv));
@@ -2151,7 +2156,7 @@ void DiskCacheBackendTest::BackendInvalidEntry3() {
InitCache();
disk_cache::Entry* entry;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
while (iter->OpenNextEntry(&entry) == net::OK) {
entry->Close();
}
@@ -2296,7 +2301,7 @@ void DiskCacheBackendTest::BackendInvalidEntry7() {
EXPECT_EQ(1, cache_->GetEntryCount());
// We should delete the cache. The list still has a corrupt node.
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
FlushQueueForTest();
EXPECT_EQ(0, cache_->GetEntryCount());
@@ -2340,7 +2345,7 @@ void DiskCacheBackendTest::BackendInvalidEntry8() {
EXPECT_EQ(1, cache_->GetEntryCount());
// We should not delete the cache.
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
entry->Close();
EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
@@ -2390,7 +2395,7 @@ void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
} else {
// We should detect the problem through the list, but we should not delete
// the entry, just fail the iteration.
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
// Now a full iteration will work, and return one entry.
@@ -2467,7 +2472,7 @@ void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
// Detection order: third -> second -> first.
// We should detect the problem through the list, but we should not delete
// the entry.
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
entry->Close();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
@@ -2534,7 +2539,7 @@ void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
// Detection order: third -> second.
// We should detect the problem through the list, but we should not delete
// the entry, just fail the iteration.
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
entry->Close();
EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
@@ -2622,7 +2627,7 @@ TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
// If the LRU is corrupt, we delete the cache.
void DiskCacheBackendTest::BackendInvalidRankings() {
disk_cache::Entry* entry;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
entry->Close();
EXPECT_EQ(2, cache_->GetEntryCount());
@@ -2667,7 +2672,7 @@ TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
// If the LRU is corrupt and we have open entries, we disable the cache.
void DiskCacheBackendTest::BackendDisable() {
disk_cache::Entry *entry1, *entry2;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
@@ -2718,7 +2723,7 @@ void DiskCacheBackendTest::BackendDisable2() {
EXPECT_EQ(8, cache_->GetEntryCount());
disk_cache::Entry* entry;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
int count = 0;
while (iter->OpenNextEntry(&entry) == net::OK) {
ASSERT_TRUE(NULL != entry);
@@ -2766,7 +2771,7 @@ TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
// If the index size changes when we disable the cache, we should not crash.
void DiskCacheBackendTest::BackendDisable3() {
disk_cache::Entry *entry1, *entry2;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
EXPECT_EQ(2, cache_->GetEntryCount());
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
entry1->Close();
@@ -2800,7 +2805,7 @@ TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
// If we disable the cache, already open entries should work as far as possible.
void DiskCacheBackendTest::BackendDisable4() {
disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
char key2[2000];
@@ -2866,7 +2871,7 @@ void DiskCacheBackendTest::BackendDisabledAPI() {
cache_impl_->SetUnitTestMode(); // Simulate failure restarting the cache.
disk_cache::Entry* entry1, *entry2;
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
EXPECT_EQ(2, cache_->GetEntryCount());
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
entry1->Close();
@@ -2991,7 +2996,7 @@ TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
MessageLoopHelper helper;
ASSERT_TRUE(CleanupCacheDir());
- scoped_ptr<disk_cache::BackendImpl> cache;
+ std::unique_ptr<disk_cache::BackendImpl> cache;
cache.reset(new disk_cache::BackendImpl(
cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
ASSERT_TRUE(NULL != cache.get());
@@ -3006,7 +3011,7 @@ TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
TEST_F(DiskCacheBackendTest, TimerNotCreated) {
ASSERT_TRUE(CopyTestCache("wrong_version"));
- scoped_ptr<disk_cache::BackendImpl> cache;
+ std::unique_ptr<disk_cache::BackendImpl> cache;
cache.reset(new disk_cache::BackendImpl(
cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
ASSERT_TRUE(NULL != cache.get());
@@ -3158,7 +3163,7 @@ TEST_F(DiskCacheTest, MultipleInstances) {
net::TestCompletionCallback cb;
const int kNumberOfCaches = 2;
- scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
+ std::unique_ptr<disk_cache::Backend> cache[kNumberOfCaches];
int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
net::CACHE_BACKEND_DEFAULT,
@@ -3625,7 +3630,7 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
// Check that enumeration returns all entries.
std::set<std::string> keys_to_match(key_pool);
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
size_t count = 0;
ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
iter.reset();
@@ -3665,7 +3670,7 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
// Check that enumeration returns all entries but the doomed one.
std::set<std::string> keys_to_match(key_pool);
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
size_t count = 0;
ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
iter.get(),
@@ -3687,9 +3692,6 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
SetSimpleCacheMode();
InitCache();
- std::set<std::string> key_pool;
- ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
-
// Create a corrupt entry. The write/read sequence ensures that the entry will
// have been created before corrupting the platform files, in the case of
// optimistic operations.
@@ -3706,13 +3708,16 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
corrupted_entry->Close();
+ std::set<std::string> key_pool;
+ ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
+
EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
key, cache_path_));
EXPECT_EQ(key_pool.size() + 1, static_cast<size_t>(cache_->GetEntryCount()));
// Check that enumeration returns all entries but the corrupt one.
std::set<std::string> keys_to_match(key_pool);
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
size_t count = 0;
ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
iter.reset();
@@ -3729,7 +3734,7 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
std::set<std::string> key_pool;
ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
disk_cache::Entry* entry = NULL;
ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
EXPECT_TRUE(entry);
@@ -3739,6 +3744,27 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
// This test passes if we don't leak memory.
}
+// Tests that enumerations include entries with long keys.
+TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationLongKeys) {
+ SetSimpleCacheMode();
+ InitCache();
+ std::set<std::string> key_pool;
+ ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
+
+ const size_t long_key_length =
+ disk_cache::SimpleSynchronousEntry::kInitialHeaderRead + 10;
+ std::string long_key(long_key_length, 'X');
+ key_pool.insert(long_key);
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry(long_key.c_str(), &entry));
+ entry->Close();
+
+ std::unique_ptr<TestIterator> iter = CreateIterator();
+ size_t count = 0;
+ EXPECT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &key_pool, &count));
+ EXPECT_TRUE(key_pool.empty());
+}
+
// Tests that a SimpleCache doesn't crash when files are deleted very quickly
// after closing.
// NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
diff --git a/chromium/net/disk_cache/blockfile/addr.cc b/chromium/net/disk_cache/blockfile/addr.cc
index ab418c4dc03..cbe42f7f5fa 100644
--- a/chromium/net/disk_cache/blockfile/addr.cc
+++ b/chromium/net/disk_cache/blockfile/addr.cc
@@ -26,7 +26,7 @@ bool Addr::SetFileNumber(int file_number) {
return true;
}
-bool Addr::SanityCheckV2() const {
+bool Addr::SanityCheck() const {
if (!is_initialized())
return !value_;
@@ -39,22 +39,8 @@ bool Addr::SanityCheckV2() const {
return !reserved_bits();
}
-bool Addr::SanityCheckV3() const {
- if (!is_initialized())
- return !value_;
-
- // For actual entries, SanityCheckForEntryV3 should be used.
- if (file_type() > BLOCK_FILES)
- return false;
-
- if (is_separate_file())
- return true;
-
- return !reserved_bits();
-}
-
-bool Addr::SanityCheckForEntryV2() const {
- if (!SanityCheckV2() || !is_initialized())
+bool Addr::SanityCheckForEntry() const {
+ if (!SanityCheck() || !is_initialized())
return false;
if (is_separate_file() || file_type() != BLOCK_256)
@@ -63,24 +49,8 @@ bool Addr::SanityCheckForEntryV2() const {
return true;
}
-bool Addr::SanityCheckForEntryV3() const {
- if (!is_initialized())
- return false;
-
- if (reserved_bits())
- return false;
-
- if (file_type() != BLOCK_ENTRIES && file_type() != BLOCK_EVICTED)
- return false;
-
- if (num_blocks() != 1)
- return false;
-
- return true;
-}
-
bool Addr::SanityCheckForRankings() const {
- if (!SanityCheckV2() || !is_initialized())
+ if (!SanityCheck() || !is_initialized())
return false;
if (is_separate_file() || file_type() != RANKINGS || num_blocks() != 1)
diff --git a/chromium/net/disk_cache/blockfile/addr.h b/chromium/net/disk_cache/blockfile/addr.h
index cf6071ed9de..f21fbf2aa14 100644
--- a/chromium/net/disk_cache/blockfile/addr.h
+++ b/chromium/net/disk_cache/blockfile/addr.h
@@ -31,7 +31,6 @@ const int kMaxBlockSize = 4096 * 4;
const int16_t kMaxBlockFile = 255;
const int kMaxNumBlocks = 4;
const int16_t kFirstAdditionalBlockFile = 4;
-const size_t kFirstAdditionalBlockFileV3 = 7;
// Defines a storage address for a cache record
//
@@ -161,10 +160,8 @@ class NET_EXPORT_PRIVATE Addr {
}
// Returns true if this address looks like a valid one.
- bool SanityCheckV2() const;
- bool SanityCheckV3() const;
- bool SanityCheckForEntryV2() const;
- bool SanityCheckForEntryV3() const;
+ bool SanityCheck() const;
+ bool SanityCheckForEntry() const;
bool SanityCheckForRankings() const;
private:
diff --git a/chromium/net/disk_cache/blockfile/addr_unittest.cc b/chromium/net/disk_cache/blockfile/addr_unittest.cc
index b62e31f7925..dbb2dbc04e1 100644
--- a/chromium/net/disk_cache/blockfile/addr_unittest.cc
+++ b/chromium/net/disk_cache/blockfile/addr_unittest.cc
@@ -36,24 +36,22 @@ TEST_F(DiskCacheTest, CacheAddr_InvalidValues) {
TEST_F(DiskCacheTest, CacheAddr_SanityCheck) {
// First a few valid values.
- EXPECT_TRUE(Addr(0).SanityCheckV2());
- EXPECT_TRUE(Addr(0x80001000).SanityCheckV2());
- EXPECT_TRUE(Addr(0xC3FFFFFF).SanityCheckV2());
- EXPECT_TRUE(Addr(0xC0FFFFFF).SanityCheckV2());
- EXPECT_TRUE(Addr(0xD0001000).SanityCheckV3());
+ EXPECT_TRUE(Addr(0).SanityCheck());
+ EXPECT_TRUE(Addr(0x80001000).SanityCheck());
+ EXPECT_TRUE(Addr(0xC3FFFFFF).SanityCheck());
+ EXPECT_TRUE(Addr(0xC0FFFFFF).SanityCheck());
// Not initialized.
- EXPECT_FALSE(Addr(0x20).SanityCheckV2());
- EXPECT_FALSE(Addr(0x10001000).SanityCheckV2());
+ EXPECT_FALSE(Addr(0x20).SanityCheck());
+ EXPECT_FALSE(Addr(0x10001000).SanityCheck());
// Invalid file type.
- EXPECT_FALSE(Addr(0xD0001000).SanityCheckV2());
- EXPECT_FALSE(Addr(0xE0001000).SanityCheckV3());
- EXPECT_FALSE(Addr(0xF0000000).SanityCheckV2());
+ EXPECT_FALSE(Addr(0xD0001000).SanityCheck());
+ EXPECT_FALSE(Addr(0xF0000000).SanityCheck());
// Reserved bits.
- EXPECT_FALSE(Addr(0x14000000).SanityCheckV2());
- EXPECT_FALSE(Addr(0x18000000).SanityCheckV2());
+ EXPECT_FALSE(Addr(0x14000000).SanityCheck());
+ EXPECT_FALSE(Addr(0x18000000).SanityCheck());
}
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/backend_impl.cc b/chromium/net/disk_cache/blockfile/backend_impl.cc
index 980ddc73a53..fe05bac3d61 100644
--- a/chromium/net/disk_cache/blockfile/backend_impl.cc
+++ b/chromium/net/disk_cache/blockfile/backend_impl.cc
@@ -22,8 +22,8 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
-#include "base/thread_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "net/base/net_errors.h"
@@ -387,7 +387,7 @@ int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
return net::ERR_FAILED;
EntryImpl* node;
- scoped_ptr<Rankings::Iterator> iterator(new Rankings::Iterator());
+ std::unique_ptr<Rankings::Iterator> iterator(new Rankings::Iterator());
EntryImpl* next = OpenNextEntryImpl(iterator.get());
if (!next)
return net::OK;
@@ -429,7 +429,7 @@ int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
stats_.OnEvent(Stats::DOOM_RECENT);
for (;;) {
- scoped_ptr<Rankings::Iterator> iterator(new Rankings::Iterator());
+ std::unique_ptr<Rankings::Iterator> iterator(new Rankings::Iterator());
EntryImpl* entry = OpenNextEntryImpl(iterator.get());
if (!entry)
return net::OK;
@@ -453,7 +453,8 @@ int BackendImpl::SyncOpenNextEntry(Rankings::Iterator* iterator,
return (*next_entry) ? net::OK : net::ERR_FAILED;
}
-void BackendImpl::SyncEndEnumeration(scoped_ptr<Rankings::Iterator> iterator) {
+void BackendImpl::SyncEndEnumeration(
+ std::unique_ptr<Rankings::Iterator> iterator) {
iterator->Reset();
}
@@ -497,12 +498,6 @@ EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
int64_t use_hours = total_hours - no_use_hours;
if (!cache_entry) {
- CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
- CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
- CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0,
- static_cast<base::HistogramBase::Sample>(total_hours));
- CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0,
- static_cast<base::HistogramBase::Sample>(use_hours));
stats_.OnEvent(Stats::OPEN_MISS);
return NULL;
}
@@ -1296,11 +1291,12 @@ class BackendImpl::IteratorImpl : public Backend::Iterator {
private:
const base::WeakPtr<InFlightBackendIO> background_queue_;
- scoped_ptr<Rankings::Iterator> iterator_;
+ std::unique_ptr<Rankings::Iterator> iterator_;
};
-scoped_ptr<Backend::Iterator> BackendImpl::CreateIterator() {
- return scoped_ptr<Backend::Iterator>(new IteratorImpl(GetBackgroundQueue()));
+std::unique_ptr<Backend::Iterator> BackendImpl::CreateIterator() {
+ return std::unique_ptr<Backend::Iterator>(
+ new IteratorImpl(GetBackgroundQueue()));
}
void BackendImpl::GetStats(StatsItems* stats) {
@@ -1456,7 +1452,7 @@ bool BackendImpl::InitStats() {
if (!file)
return false;
- scoped_ptr<char[]> data(new char[size]);
+ std::unique_ptr<char[]> data(new char[size]);
size_t offset = address.start_block() * address.BlockSize() +
kBlockHeaderSize;
if (!file->Read(data.get(), size, offset))
@@ -1471,7 +1467,7 @@ bool BackendImpl::InitStats() {
void BackendImpl::StoreStats() {
int size = stats_.StorageSize();
- scoped_ptr<char[]> data(new char[size]);
+ std::unique_ptr<char[]> data(new char[size]);
Addr address;
size = stats_.SerializeStats(data.get(), size, &address);
DCHECK(size);
@@ -1545,7 +1541,7 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
STRESS_DCHECK(block_files_.IsValid(address));
- if (!address.SanityCheckForEntryV2()) {
+ if (!address.SanityCheckForEntry()) {
LOG(WARNING) << "Wrong entry address.";
STRESS_NOTREACHED();
return ERR_INVALID_ADDRESS;
diff --git a/chromium/net/disk_cache/blockfile/backend_impl.h b/chromium/net/disk_cache/blockfile/backend_impl.h
index 89edef2eb58..5ecd3072ae7 100644
--- a/chromium/net/disk_cache/blockfile/backend_impl.h
+++ b/chromium/net/disk_cache/blockfile/backend_impl.h
@@ -9,7 +9,8 @@
#include <stdint.h>
-#include "base/containers/hash_tables.h"
+#include <unordered_map>
+
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -79,7 +80,7 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend {
int SyncCalculateSizeOfAllEntries();
int SyncDoomEntriesSince(base::Time initial_time);
int SyncOpenNextEntry(Rankings::Iterator* iterator, Entry** next_entry);
- void SyncEndEnumeration(scoped_ptr<Rankings::Iterator> iterator);
+ void SyncEndEnumeration(std::unique_ptr<Rankings::Iterator> iterator);
void SyncOnExternalCacheHit(const std::string& key);
// Open or create an entry for the given |key| or |iter|.
@@ -288,12 +289,12 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend {
// the iterator (for example, deleting the entry) will invalidate the
// iterator. Performing operations on an entry that modify the entry may
// result in loops in the iteration, skipped entries or similar.
- scoped_ptr<Iterator> CreateIterator() override;
+ std::unique_ptr<Iterator> CreateIterator() override;
void GetStats(StatsItems* stats) override;
void OnExternalCacheHit(const std::string& key) override;
private:
- typedef base::hash_map<CacheAddr, EntryImpl*> EntriesMap;
+ using EntriesMap = std::unordered_map<CacheAddr, EntryImpl*>;
class IteratorImpl;
// Creates a new backing file for the cache index.
@@ -403,7 +404,7 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend {
net::NetLog* net_log_;
Stats stats_; // Usage statistics.
- scoped_ptr<base::RepeatingTimer> timer_; // Usage timer.
+ std::unique_ptr<base::RepeatingTimer> timer_; // Usage timer.
base::WaitableEvent done_; // Signals the end of background work.
scoped_refptr<TraceObject> trace_object_; // Initializes internal tracing.
base::WeakPtrFactory<BackendImpl> ptr_factory_;
diff --git a/chromium/net/disk_cache/blockfile/backend_impl_v3.cc b/chromium/net/disk_cache/blockfile/backend_impl_v3.cc
deleted file mode 100644
index ae90bd96aef..00000000000
--- a/chromium/net/disk_cache/blockfile/backend_impl_v3.cc
+++ /dev/null
@@ -1,1544 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/backend_impl_v3.h"
-
-#include <limits>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/hash.h"
-#include "base/message_loop/message_loop.h"
-#include "base/metrics/field_trial.h"
-#include "base/rand_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/sys_info.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/time/time.h"
-#include "base/timer/timer.h"
-#include "net/base/net_errors.h"
-#include "net/disk_cache/blockfile/disk_format_v3.h"
-#include "net/disk_cache/blockfile/entry_impl_v3.h"
-#include "net/disk_cache/blockfile/errors.h"
-#include "net/disk_cache/blockfile/experiments.h"
-#include "net/disk_cache/blockfile/file.h"
-#include "net/disk_cache/blockfile/histogram_macros_v3.h"
-#include "net/disk_cache/blockfile/index_table_v3.h"
-#include "net/disk_cache/blockfile/storage_block-inl.h"
-#include "net/disk_cache/cache_util.h"
-
-// Provide a BackendImpl object to macros from histogram_macros.h.
-#define CACHE_UMA_BACKEND_IMPL_OBJ this
-
-using base::Time;
-using base::TimeDelta;
-using base::TimeTicks;
-
-namespace {
-
-#if defined(V3_NOT_JUST_YET_READY)
-const int kDefaultCacheSize = 80 * 1024 * 1024;
-
-// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
-const int kTrimDelay = 10;
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-} // namespace
-
-// ------------------------------------------------------------------------
-
-namespace disk_cache {
-
-BackendImplV3::BackendImplV3(
- const base::FilePath& path,
- const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
- net::NetLog* net_log)
- : index_(NULL),
- path_(path),
- block_files_(),
- max_size_(0),
- up_ticks_(0),
- cache_type_(net::DISK_CACHE),
- uma_report_(0),
- user_flags_(0),
- init_(false),
- restarted_(false),
- read_only_(false),
- disabled_(false),
- lru_eviction_(true),
- first_timer_(true),
- user_load_(false),
- net_log_(net_log),
- ptr_factory_(this) {
-}
-
-BackendImplV3::~BackendImplV3() {
- CleanupCache();
-}
-
-int BackendImplV3::Init(const CompletionCallback& callback) {
- DCHECK(!init_);
- if (init_)
- return net::ERR_FAILED;
-
- return net::ERR_IO_PENDING;
-}
-
-// ------------------------------------------------------------------------
-
-bool BackendImplV3::SetMaxSize(int max_bytes) {
- static_assert(sizeof(max_bytes) == sizeof(max_size_),
- "unsupported int model");
- if (max_bytes < 0)
- return false;
-
- // Zero size means use the default.
- if (!max_bytes)
- return true;
-
- // Avoid a DCHECK later on.
- if (max_bytes >= std::numeric_limits<int32_t>::max() -
- std::numeric_limits<int32_t>::max() / 10) {
- max_bytes = std::numeric_limits<int32_t>::max() -
- std::numeric_limits<int32_t>::max() / 10 - 1;
- }
-
- user_flags_ |= MAX_SIZE;
- max_size_ = max_bytes;
- return true;
-}
-
-void BackendImplV3::SetType(net::CacheType type) {
- DCHECK_NE(net::MEMORY_CACHE, type);
- cache_type_ = type;
-}
-
-bool BackendImplV3::CreateBlock(FileType block_type, int block_count,
- Addr* block_address) {
- return block_files_.CreateBlock(block_type, block_count, block_address);
-}
-
-#if defined(V3_NOT_JUST_YET_READY)
-void BackendImplV3::UpdateRank(EntryImplV3* entry, bool modified) {
- if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
- return;
- eviction_.UpdateRank(entry, modified);
-}
-
-void BackendImplV3::InternalDoomEntry(EntryImplV3* entry) {
- uint32_t hash = entry->GetHash();
- std::string key = entry->GetKey();
- Addr entry_addr = entry->entry()->address();
- bool error;
- EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
- CacheAddr child(entry->GetNextAddress());
-
- Trace("Doom entry 0x%p", entry);
-
- if (!entry->doomed()) {
- // We may have doomed this entry from within MatchEntry.
- eviction_.OnDoomEntry(entry);
- entry->InternalDoom();
- if (!new_eviction_) {
- DecreaseNumEntries();
- }
- stats_.OnEvent(Stats::DOOM_ENTRY);
- }
-
- if (parent_entry) {
- parent_entry->SetNextAddress(Addr(child));
- parent_entry->Release();
- } else if (!error) {
- data_->table[hash & mask_] = child;
- }
-
- FlushIndex();
-}
-
-void BackendImplV3::OnEntryDestroyBegin(Addr address) {
- EntriesMap::iterator it = open_entries_.find(address.value());
- if (it != open_entries_.end())
- open_entries_.erase(it);
-}
-
-void BackendImplV3::OnEntryDestroyEnd() {
- DecreaseNumRefs();
- if (data_->header.num_bytes > max_size_ && !read_only_ &&
- (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
- eviction_.TrimCache(false);
-}
-
-EntryImplV3* BackendImplV3::GetOpenEntry(Addr address) const {
- DCHECK(rankings->HasData());
- EntriesMap::const_iterator it =
- open_entries_.find(rankings->Data()->contents);
- if (it != open_entries_.end()) {
- // We have this entry in memory.
- return it->second;
- }
-
- return NULL;
-}
-
-int BackendImplV3::MaxFileSize() const {
- return max_size_ / 8;
-}
-
-void BackendImplV3::ModifyStorageSize(int32_t old_size, int32_t new_size) {
- if (disabled_ || old_size == new_size)
- return;
- if (old_size > new_size)
- SubstractStorageSize(old_size - new_size);
- else
- AddStorageSize(new_size - old_size);
-
- // Update the usage statistics.
- stats_.ModifyStorageStats(old_size, new_size);
-}
-
-void BackendImplV3::TooMuchStorageRequested(int32_t size) {
- stats_.ModifyStorageStats(0, size);
-}
-
-bool BackendImplV3::IsAllocAllowed(int current_size, int new_size) {
- DCHECK_GT(new_size, current_size);
- if (user_flags_ & NO_BUFFERING)
- return false;
-
- int to_add = new_size - current_size;
- if (buffer_bytes_ + to_add > MaxBuffersSize())
- return false;
-
- buffer_bytes_ += to_add;
- CACHE_UMA(COUNTS_50000, "BufferBytes", buffer_bytes_ / 1024);
- return true;
-}
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-void BackendImplV3::BufferDeleted(int size) {
- DCHECK_GE(size, 0);
- buffer_bytes_ -= size;
- DCHECK_GE(buffer_bytes_, 0);
-}
-
-bool BackendImplV3::IsLoaded() const {
- if (user_flags_ & NO_LOAD_PROTECTION)
- return false;
-
- return user_load_;
-}
-
-std::string BackendImplV3::HistogramName(const char* name) const {
- static const char* const names[] = {
- "Http", "", "Media", "AppCache", "Shader" };
- DCHECK_NE(cache_type_, net::MEMORY_CACHE);
- return base::StringPrintf("DiskCache3.%s_%s", name, names[cache_type_]);
-}
-
-base::WeakPtr<BackendImplV3> BackendImplV3::GetWeakPtr() {
- return ptr_factory_.GetWeakPtr();
-}
-
-#if defined(V3_NOT_JUST_YET_READY)
-// We want to remove biases from some histograms so we only send data once per
-// week.
-bool BackendImplV3::ShouldReportAgain() {
- if (uma_report_)
- return uma_report_ == 2;
-
- uma_report_++;
- int64_t last_report = stats_.GetCounter(Stats::LAST_REPORT);
- Time last_time = Time::FromInternalValue(last_report);
- if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
- stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
- uma_report_++;
- return true;
- }
- return false;
-}
-
-void BackendImplV3::FirstEviction() {
- IndexHeaderV3* header = index_.header();
- header->flags |= CACHE_EVICTED;
- DCHECK(header->create_time);
- if (!GetEntryCount())
- return; // This is just for unit tests.
-
- Time create_time = Time::FromInternalValue(header->create_time);
- CACHE_UMA(AGE, "FillupAge", create_time);
-
- int64_t use_time = stats_.GetCounter(Stats::TIMER);
- CACHE_UMA(HOURS, "FillupTime", static_cast<int>(use_time / 120));
- CACHE_UMA(PERCENTAGE, "FirstHitRatio", stats_.GetHitRatio());
-
- if (!use_time)
- use_time = 1;
- CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate",
- static_cast<int>(header->num_entries / use_time));
- CACHE_UMA(COUNTS, "FirstByteIORate",
- static_cast<int>((header->num_bytes / 1024) / use_time));
-
- int avg_size = header->num_bytes / GetEntryCount();
- CACHE_UMA(COUNTS, "FirstEntrySize", avg_size);
-
- int large_entries_bytes = stats_.GetLargeEntriesSize();
- int large_ratio = large_entries_bytes * 100 / header->num_bytes;
- CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", large_ratio);
-
- if (!lru_eviction_) {
- CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", stats_.GetResurrectRatio());
- CACHE_UMA(PERCENTAGE, "FirstNoUseRatio",
- header->num_no_use_entries * 100 / header->num_entries);
- CACHE_UMA(PERCENTAGE, "FirstLowUseRatio",
- header->num_low_use_entries * 100 / header->num_entries);
- CACHE_UMA(PERCENTAGE, "FirstHighUseRatio",
- header->num_high_use_entries * 100 / header->num_entries);
- }
-
- stats_.ResetRatios();
-}
-
-void BackendImplV3::OnEvent(Stats::Counters an_event) {
- stats_.OnEvent(an_event);
-}
-
-void BackendImplV3::OnRead(int32_t bytes) {
- DCHECK_GE(bytes, 0);
- byte_count_ += bytes;
- if (byte_count_ < 0)
- byte_count_ = std::numeric_limits<int32_t>::max();
-}
-
-void BackendImplV3::OnWrite(int32_t bytes) {
- // We use the same implementation as OnRead... just log the number of bytes.
- OnRead(bytes);
-}
-
-void BackendImplV3::OnTimerTick() {
- stats_.OnEvent(Stats::TIMER);
- int64_t time = stats_.GetCounter(Stats::TIMER);
- int64_t current = stats_.GetCounter(Stats::OPEN_ENTRIES);
-
- // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
- // the bias towards 0.
- if (num_refs_ && (current != num_refs_)) {
- int64_t diff = (num_refs_ - current) / 50;
- if (!diff)
- diff = num_refs_ > current ? 1 : -1;
- current = current + diff;
- stats_.SetCounter(Stats::OPEN_ENTRIES, current);
- stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
- }
-
- CACHE_UMA(COUNTS, "NumberOfReferences", num_refs_);
-
- CACHE_UMA(COUNTS_10000, "EntryAccessRate", entry_count_);
- CACHE_UMA(COUNTS, "ByteIORate", byte_count_ / 1024);
-
- // These values cover about 99.5% of the population (Oct 2011).
- user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
- entry_count_ = 0;
- byte_count_ = 0;
- up_ticks_++;
-
- if (!data_)
- first_timer_ = false;
- if (first_timer_) {
- first_timer_ = false;
- if (ShouldReportAgain())
- ReportStats();
- }
-
- // Save stats to disk at 5 min intervals.
- if (time % 10 == 0)
- StoreStats();
-}
-
-void BackendImplV3::SetUnitTestMode() {
- user_flags_ |= UNIT_TEST_MODE;
-}
-
-void BackendImplV3::SetUpgradeMode() {
- user_flags_ |= UPGRADE_MODE;
- read_only_ = true;
-}
-
-void BackendImplV3::SetNewEviction() {
- user_flags_ |= EVICTION_V2;
- lru_eviction_ = false;
-}
-
-void BackendImplV3::SetFlags(uint32_t flags) {
- user_flags_ |= flags;
-}
-
-int BackendImplV3::FlushQueueForTest(const CompletionCallback& callback) {
- background_queue_.FlushQueue(callback);
- return net::ERR_IO_PENDING;
-}
-
-void BackendImplV3::TrimForTest(bool empty) {
- eviction_.SetTestMode();
- eviction_.TrimCache(empty);
-}
-
-void BackendImplV3::TrimDeletedListForTest(bool empty) {
- eviction_.SetTestMode();
- eviction_.TrimDeletedList(empty);
-}
-
-int BackendImplV3::SelfCheck() {
- if (!init_) {
- LOG(ERROR) << "Init failed";
- return ERR_INIT_FAILED;
- }
-
- int num_entries = rankings_.SelfCheck();
- if (num_entries < 0) {
- LOG(ERROR) << "Invalid rankings list, error " << num_entries;
-#if !defined(NET_BUILD_STRESS_CACHE)
- return num_entries;
-#endif
- }
-
- if (num_entries != data_->header.num_entries) {
- LOG(ERROR) << "Number of entries mismatch";
-#if !defined(NET_BUILD_STRESS_CACHE)
- return ERR_NUM_ENTRIES_MISMATCH;
-#endif
- }
-
- return CheckAllEntries();
-}
-
-// ------------------------------------------------------------------------
-
-net::CacheType BackendImplV3::GetCacheType() const {
- return cache_type_;
-}
-
-int32_t BackendImplV3::GetEntryCount() const {
- if (disabled_)
- return 0;
- DCHECK(init_);
- return index_.header()->num_entries;
-}
-
-int BackendImplV3::OpenEntry(const std::string& key, Entry** entry,
- const CompletionCallback& callback) {
- if (disabled_)
- return NULL;
-
- TimeTicks start = TimeTicks::Now();
- uint32_t hash = base::Hash(key);
- Trace("Open hash 0x%x", hash);
-
- bool error;
- EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
- if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
- // The entry was already evicted.
- cache_entry->Release();
- cache_entry = NULL;
- }
-
- int current_size = data_->header.num_bytes / (1024 * 1024);
- int64_t total_hours = stats_.GetCounter(Stats::TIMER) / 120;
- int64_t no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
- int64_t use_hours = total_hours - no_use_hours;
-
- if (!cache_entry) {
- CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
- CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
- CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
- CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
- stats_.OnEvent(Stats::OPEN_MISS);
- return NULL;
- }
-
- eviction_.OnOpenEntry(cache_entry);
- entry_count_++;
-
- Trace("Open hash 0x%x end: 0x%x", hash,
- cache_entry->entry()->address().value());
- CACHE_UMA(AGE_MS, "OpenTime", 0, start);
- CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
- CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
- CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
- stats_.OnEvent(Stats::OPEN_HIT);
- SIMPLE_STATS_COUNTER("disk_cache.hit");
- return cache_entry;
-}
-
-int BackendImplV3::CreateEntry(const std::string& key, Entry** entry,
- const CompletionCallback& callback) {
- if (disabled_ || key.empty())
- return NULL;
-
- TimeTicks start = TimeTicks::Now();
- Trace("Create hash 0x%x", hash);
-
- scoped_refptr<EntryImpl> parent;
- Addr entry_address(data_->table[hash & mask_]);
- if (entry_address.is_initialized()) {
- // We have an entry already. It could be the one we are looking for, or just
- // a hash conflict.
- bool error;
- EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
- if (old_entry)
- return ResurrectEntry(old_entry);
-
- EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
- DCHECK(!error);
- if (parent_entry) {
- parent.swap(&parent_entry);
- } else if (data_->table[hash & mask_]) {
- // We should have corrected the problem.
- NOTREACHED();
- return NULL;
- }
- }
-
- // The general flow is to allocate disk space and initialize the entry data,
- // followed by saving that to disk, then linking the entry though the index
- // and finally through the lists. If there is a crash in this process, we may
- // end up with:
- // a. Used, unreferenced empty blocks on disk (basically just garbage).
- // b. Used, unreferenced but meaningful data on disk (more garbage).
- // c. A fully formed entry, reachable only through the index.
- // d. A fully formed entry, also reachable through the lists, but still dirty.
- //
- // Anything after (b) can be automatically cleaned up. We may consider saving
- // the current operation (as we do while manipulating the lists) so that we
- // can detect and cleanup (a) and (b).
-
- int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
- if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- Addr node_address(0);
- if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
- block_files_.DeleteBlock(entry_address, false);
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- scoped_refptr<EntryImpl> cache_entry(
- new EntryImpl(this, entry_address, false));
- IncreaseNumRefs();
-
- if (!cache_entry->CreateEntry(node_address, key, hash)) {
- block_files_.DeleteBlock(entry_address, false);
- block_files_.DeleteBlock(node_address, false);
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- cache_entry->BeginLogging(net_log_, true);
-
- // We are not failing the operation; let's add this to the map.
- open_entries_[entry_address.value()] = cache_entry.get();
-
- // Save the entry.
- cache_entry->entry()->Store();
- cache_entry->rankings()->Store();
- IncreaseNumEntries();
- entry_count_++;
-
- // Link this entry through the index.
- if (parent.get()) {
- parent->SetNextAddress(entry_address);
- } else {
- data_->table[hash & mask_] = entry_address.value();
- }
-
- // Link this entry through the lists.
- eviction_.OnCreateEntry(cache_entry.get());
-
- CACHE_UMA(AGE_MS, "CreateTime", 0, start);
- stats_.OnEvent(Stats::CREATE_HIT);
- SIMPLE_STATS_COUNTER("disk_cache.miss");
- Trace("create entry hit ");
- FlushIndex();
- cache_entry->AddRef();
- return cache_entry.get();
-}
-
-int BackendImplV3::DoomEntry(const std::string& key,
- const CompletionCallback& callback) {
- if (disabled_)
- return net::ERR_FAILED;
-
- EntryImpl* entry = OpenEntryImpl(key);
- if (!entry)
- return net::ERR_FAILED;
-
- entry->DoomImpl();
- entry->Release();
- return net::OK;
-}
-
-int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) {
- // This is not really an error, but it is an interesting condition.
- ReportError(ERR_CACHE_DOOMED);
- stats_.OnEvent(Stats::DOOM_CACHE);
- if (!num_refs_) {
- RestartCache(false);
- return disabled_ ? net::ERR_FAILED : net::OK;
- } else {
- if (disabled_)
- return net::ERR_FAILED;
-
- eviction_.TrimCache(true);
- return net::OK;
- }
-}
-
-int BackendImplV3::DoomEntriesBetween(base::Time initial_time,
- base::Time end_time,
- const CompletionCallback& callback) {
- DCHECK_NE(net::APP_CACHE, cache_type_);
- if (end_time.is_null())
- return SyncDoomEntriesSince(initial_time);
-
- DCHECK(end_time >= initial_time);
-
- if (disabled_)
- return net::ERR_FAILED;
-
- EntryImpl* node;
- void* iter = NULL;
- EntryImpl* next = OpenNextEntryImpl(&iter);
- if (!next)
- return net::OK;
-
- while (next) {
- node = next;
- next = OpenNextEntryImpl(&iter);
-
- if (node->GetLastUsed() >= initial_time &&
- node->GetLastUsed() < end_time) {
- node->DoomImpl();
- } else if (node->GetLastUsed() < initial_time) {
- if (next)
- next->Release();
- next = NULL;
- SyncEndEnumeration(iter);
- }
-
- node->Release();
- }
-
- return net::OK;
-}
-
-int BackendImplV3::DoomEntriesSince(base::Time initial_time,
- const CompletionCallback& callback) {
- DCHECK_NE(net::APP_CACHE, cache_type_);
- if (disabled_)
- return net::ERR_FAILED;
-
- stats_.OnEvent(Stats::DOOM_RECENT);
- for (;;) {
- void* iter = NULL;
- EntryImpl* entry = OpenNextEntryImpl(&iter);
- if (!entry)
- return net::OK;
-
- if (initial_time > entry->GetLastUsed()) {
- entry->Release();
- SyncEndEnumeration(iter);
- return net::OK;
- }
-
- entry->DoomImpl();
- entry->Release();
- SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
- }
-}
-
-class BackendImplV3::IteratorImpl : public Backend::Iterator {
- public:
- explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue)
- : background_queue_(background_queue), data_(NULL) {
- }
-
- int OpenNextEntry(Entry** next_entry,
- const net::CompletionCallback& callback) override {
- if (!background_queue_)
- return net::ERR_FAILED;
- background_queue_->OpenNextEntry(&data_, next_entry, callback);
- return net::ERR_IO_PENDING;
- }
-
- private:
- const base::WeakPtr<InFlightBackendIO> background_queue_;
- void* data_;
-};
-
-scoped_ptr<Backend::Iterator> BackendImplV3::CreateIterator() {
- return scoped_ptr<Backend::Iterator>(new IteratorImpl(GetBackgroundQueue()));
-}
-
-void BackendImplV3::GetStats(StatsItems* stats) {
- if (disabled_)
- return;
-
- std::pair<std::string, std::string> item;
-
- item.first = "Entries";
- item.second = base::IntToString(data_->header.num_entries);
- stats->push_back(item);
-
- item.first = "Pending IO";
- item.second = base::IntToString(num_pending_io_);
- stats->push_back(item);
-
- item.first = "Max size";
- item.second = base::IntToString(max_size_);
- stats->push_back(item);
-
- item.first = "Current size";
- item.second = base::IntToString(data_->header.num_bytes);
- stats->push_back(item);
-
- item.first = "Cache type";
- item.second = "Blockfile Cache";
- stats->push_back(item);
-
- stats_.GetItems(stats);
-}
-
-void BackendImplV3::OnExternalCacheHit(const std::string& key) {
- if (disabled_)
- return;
-
- uint32_t hash = base::Hash(key);
- bool error;
- EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
- if (cache_entry) {
- if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
- UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
- }
- cache_entry->Release();
- }
-}
-
-// ------------------------------------------------------------------------
-
-// The maximum cache size will be either set explicitly by the caller, or
-// calculated by this code.
-void BackendImplV3::AdjustMaxCacheSize(int table_len) {
- if (max_size_)
- return;
-
- // If table_len is provided, the index file exists.
- DCHECK(!table_len || data_->header.magic);
-
- // The user is not setting the size, let's figure it out.
- int64_t available = base::SysInfo::AmountOfFreeDiskSpace(path_);
- if (available < 0) {
- max_size_ = kDefaultCacheSize;
- return;
- }
-
- if (table_len)
- available += data_->header.num_bytes;
-
- max_size_ = PreferedCacheSize(available);
-
- // Let's not use more than the default size while we tune-up the performance
- // of bigger caches. TODO(rvargas): remove this limit.
- if (max_size_ > kDefaultCacheSize * 4)
- max_size_ = kDefaultCacheSize * 4;
-
- if (!table_len)
- return;
-
- // If we already have a table, adjust the size to it.
- int current_max_size = MaxStorageSizeForTable(table_len);
- if (max_size_ > current_max_size)
- max_size_= current_max_size;
-}
-
-bool BackendImplV3::InitStats() {
- Addr address(data_->header.stats);
- int size = stats_.StorageSize();
-
- if (!address.is_initialized()) {
- FileType file_type = Addr::RequiredFileType(size);
- DCHECK_NE(file_type, EXTERNAL);
- int num_blocks = Addr::RequiredBlocks(size, file_type);
-
- if (!CreateBlock(file_type, num_blocks, &address))
- return false;
- return stats_.Init(NULL, 0, address);
- }
-
- if (!address.is_block_file()) {
- NOTREACHED();
- return false;
- }
-
- // Load the required data.
- size = address.num_blocks() * address.BlockSize();
- MappedFile* file = File(address);
- if (!file)
- return false;
-
- scoped_ptr<char[]> data(new char[size]);
- size_t offset = address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
- if (!file->Read(data.get(), size, offset))
- return false;
-
- if (!stats_.Init(data.get(), size, address))
- return false;
- if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
- stats_.InitSizeHistogram();
- return true;
-}
-
-void BackendImplV3::StoreStats() {
- int size = stats_.StorageSize();
- scoped_ptr<char[]> data(new char[size]);
- Addr address;
- size = stats_.SerializeStats(data.get(), size, &address);
- DCHECK(size);
- if (!address.is_initialized())
- return;
-
- MappedFile* file = File(address);
- if (!file)
- return;
-
- size_t offset = address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
- file->Write(data.get(), size, offset); // ignore result.
-}
-
-void BackendImplV3::RestartCache(bool failure) {
- int64_t errors = stats_.GetCounter(Stats::FATAL_ERROR);
- int64_t full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
- int64_t partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
- int64_t last_report = stats_.GetCounter(Stats::LAST_REPORT);
-
- PrepareForRestart();
- if (failure) {
- DCHECK(!num_refs_);
- DCHECK(!open_entries_.size());
- DelayedCacheCleanup(path_);
- } else {
- DeleteCache(path_, false);
- }
-
- // Don't call Init() if directed by the unit test: we are simulating a failure
- // trying to re-enable the cache.
- if (unit_test_)
- init_ = true; // Let the destructor do proper cleanup.
- else if (SyncInit() == net::OK) {
- stats_.SetCounter(Stats::FATAL_ERROR, errors);
- stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
- stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
- stats_.SetCounter(Stats::LAST_REPORT, last_report);
- }
-}
-
-void BackendImplV3::PrepareForRestart() {
- if (!(user_flags_ & EVICTION_V2))
- lru_eviction_ = true;
-
- disabled_ = true;
- data_->header.crash = 0;
- index_->Flush();
- index_ = NULL;
- data_ = NULL;
- block_files_.CloseFiles();
- rankings_.Reset();
- init_ = false;
- restarted_ = true;
-}
-
-void BackendImplV3::CleanupCache() {
- Trace("Backend Cleanup");
- eviction_.Stop();
- timer_.reset();
-
- if (init_) {
- StoreStats();
- if (data_)
- data_->header.crash = 0;
-
- if (user_flags_ & kNoRandom) {
- // This is a net_unittest, verify that we are not 'leaking' entries.
- File::WaitForPendingIO(&num_pending_io_);
- DCHECK(!num_refs_);
- } else {
- File::DropPendingIO();
- }
- }
- block_files_.CloseFiles();
- FlushIndex();
- index_ = NULL;
- ptr_factory_.InvalidateWeakPtrs();
- done_.Signal();
-}
-
-int BackendImplV3::NewEntry(Addr address, EntryImplV3** entry) {
- EntriesMap::iterator it = open_entries_.find(address.value());
- if (it != open_entries_.end()) {
- // Easy job. This entry is already in memory.
- EntryImpl* this_entry = it->second;
- this_entry->AddRef();
- *entry = this_entry;
- return 0;
- }
-
- STRESS_DCHECK(block_files_.IsValid(address));
-
- if (!address.SanityCheckForEntry()) {
- LOG(WARNING) << "Wrong entry address.";
- STRESS_NOTREACHED();
- return ERR_INVALID_ADDRESS;
- }
-
- scoped_refptr<EntryImpl> cache_entry(
- new EntryImpl(this, address, read_only_));
- IncreaseNumRefs();
- *entry = NULL;
-
- TimeTicks start = TimeTicks::Now();
- if (!cache_entry->entry()->Load())
- return ERR_READ_FAILURE;
-
- if (IsLoaded()) {
- CACHE_UMA(AGE_MS, "LoadTime", 0, start);
- }
-
- if (!cache_entry->SanityCheck()) {
- LOG(WARNING) << "Messed up entry found.";
- STRESS_NOTREACHED();
- return ERR_INVALID_ENTRY;
- }
-
- STRESS_DCHECK(block_files_.IsValid(
- Addr(cache_entry->entry()->Data()->rankings_node)));
-
- if (!cache_entry->LoadNodeAddress())
- return ERR_READ_FAILURE;
-
- if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
- STRESS_NOTREACHED();
- cache_entry->SetDirtyFlag(0);
- // Don't remove this from the list (it is not linked properly). Instead,
- // break the link back to the entry because it is going away, and leave the
- // rankings node to be deleted if we find it through a list.
- rankings_.SetContents(cache_entry->rankings(), 0);
- } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
- STRESS_NOTREACHED();
- cache_entry->SetDirtyFlag(0);
- rankings_.SetContents(cache_entry->rankings(), address.value());
- }
-
- if (!cache_entry->DataSanityCheck()) {
- LOG(WARNING) << "Messed up entry found.";
- cache_entry->SetDirtyFlag(0);
- cache_entry->FixForDelete();
- }
-
- // Prevent overwriting the dirty flag on the destructor.
- cache_entry->SetDirtyFlag(GetCurrentEntryId());
-
- if (cache_entry->dirty()) {
- Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
- address.value());
- }
-
- open_entries_[address.value()] = cache_entry.get();
-
- cache_entry->BeginLogging(net_log_, false);
- cache_entry.swap(entry);
- return 0;
-}
-
-void BackendImplV3::AddStorageSize(int32_t bytes) {
- data_->header.num_bytes += bytes;
- DCHECK_GE(data_->header.num_bytes, 0);
-}
-
-void BackendImplV3::SubstractStorageSize(int32_t bytes) {
- data_->header.num_bytes -= bytes;
- DCHECK_GE(data_->header.num_bytes, 0);
-}
-
-void BackendImplV3::IncreaseNumRefs() {
- num_refs_++;
- if (max_refs_ < num_refs_)
- max_refs_ = num_refs_;
-}
-
-void BackendImplV3::DecreaseNumRefs() {
- DCHECK(num_refs_);
- num_refs_--;
-
- if (!num_refs_ && disabled_)
- base::MessageLoop::current()->PostTask(
- FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
-}
-
-void BackendImplV3::IncreaseNumEntries() {
- index_.header()->num_entries++;
- DCHECK_GT(index_.header()->num_entries, 0);
-}
-
-void BackendImplV3::DecreaseNumEntries() {
- index_.header()->num_entries--;
- if (index_.header()->num_entries < 0) {
- NOTREACHED();
- index_.header()->num_entries = 0;
- }
-}
-
-int BackendImplV3::SyncInit() {
-#if defined(NET_BUILD_STRESS_CACHE)
- // Start evictions right away.
- up_ticks_ = kTrimDelay * 2;
-#endif
- DCHECK(!init_);
- if (init_)
- return net::ERR_FAILED;
-
- bool create_files = false;
- if (!InitBackingStore(&create_files)) {
- ReportError(ERR_STORAGE_ERROR);
- return net::ERR_FAILED;
- }
-
- num_refs_ = num_pending_io_ = max_refs_ = 0;
- entry_count_ = byte_count_ = 0;
-
- if (!restarted_) {
- buffer_bytes_ = 0;
- trace_object_ = TraceObject::GetTraceObject();
- // Create a recurrent timer of 30 secs.
- int timer_delay = unit_test_ ? 1000 : 30000;
- timer_.reset(new base::RepeatingTimer());
- timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
- &BackendImplV3::OnStatsTimer);
- }
-
- init_ = true;
- Trace("Init");
-
- if (data_->header.experiment != NO_EXPERIMENT &&
- cache_type_ != net::DISK_CACHE) {
- // No experiment for other caches.
- return net::ERR_FAILED;
- }
-
- if (!(user_flags_ & kNoRandom)) {
- // The unit test controls directly what to test.
- new_eviction_ = (cache_type_ == net::DISK_CACHE);
- }
-
- if (!CheckIndex()) {
- ReportError(ERR_INIT_FAILED);
- return net::ERR_FAILED;
- }
-
- if (!restarted_ && (create_files || !data_->header.num_entries))
- ReportError(ERR_CACHE_CREATED);
-
- if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
- !InitExperiment(&data_->header, create_files)) {
- return net::ERR_FAILED;
- }
-
- // We don't care if the value overflows. The only thing we care about is that
- // the id cannot be zero, because that value is used as "not dirty".
- // Increasing the value once per second gives us many years before we start
- // having collisions.
- data_->header.this_id++;
- if (!data_->header.this_id)
- data_->header.this_id++;
-
- bool previous_crash = (data_->header.crash != 0);
- data_->header.crash = 1;
-
- if (!block_files_.Init(create_files))
- return net::ERR_FAILED;
-
- // We want to minimize the changes to cache for an AppCache.
- if (cache_type() == net::APP_CACHE) {
- DCHECK(!new_eviction_);
- read_only_ = true;
- } else if (cache_type() == net::SHADER_CACHE) {
- DCHECK(!new_eviction_);
- }
-
- eviction_.Init(this);
-
- // stats_ and rankings_ may end up calling back to us so we better be enabled.
- disabled_ = false;
- if (!InitStats())
- return net::ERR_FAILED;
-
- disabled_ = !rankings_.Init(this, new_eviction_);
-
-#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
- trace_object_->EnableTracing(false);
- int sc = SelfCheck();
- if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
- NOTREACHED();
- trace_object_->EnableTracing(true);
-#endif
-
- if (previous_crash) {
- ReportError(ERR_PREVIOUS_CRASH);
- } else if (!restarted_) {
- ReportError(ERR_NO_ERROR);
- }
-
- FlushIndex();
-
- return disabled_ ? net::ERR_FAILED : net::OK;
-}
-
-EntryImpl* BackendImplV3::ResurrectEntry(EntryImpl* deleted_entry) {
- if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
- deleted_entry->Release();
- stats_.OnEvent(Stats::CREATE_MISS);
- Trace("create entry miss ");
- return NULL;
- }
-
- // We are attempting to create an entry and found out that the entry was
- // previously deleted.
-
- eviction_.OnCreateEntry(deleted_entry);
- entry_count_++;
-
- stats_.OnEvent(Stats::RESURRECT_HIT);
- Trace("Resurrect entry hit ");
- return deleted_entry;
-}
-
-EntryImpl* BackendImplV3::CreateEntryImpl(const std::string& key) {
- if (disabled_ || key.empty())
- return NULL;
-
- TimeTicks start = TimeTicks::Now();
- Trace("Create hash 0x%x", hash);
-
- scoped_refptr<EntryImpl> parent;
- Addr entry_address(data_->table[hash & mask_]);
- if (entry_address.is_initialized()) {
- // We have an entry already. It could be the one we are looking for, or just
- // a hash conflict.
- bool error;
- EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
- if (old_entry)
- return ResurrectEntry(old_entry);
-
- EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
- DCHECK(!error);
- if (parent_entry) {
- parent.swap(&parent_entry);
- } else if (data_->table[hash & mask_]) {
- // We should have corrected the problem.
- NOTREACHED();
- return NULL;
- }
- }
-
- // The general flow is to allocate disk space and initialize the entry data,
- // followed by saving that to disk, then linking the entry though the index
- // and finally through the lists. If there is a crash in this process, we may
- // end up with:
- // a. Used, unreferenced empty blocks on disk (basically just garbage).
- // b. Used, unreferenced but meaningful data on disk (more garbage).
- // c. A fully formed entry, reachable only through the index.
- // d. A fully formed entry, also reachable through the lists, but still dirty.
- //
- // Anything after (b) can be automatically cleaned up. We may consider saving
- // the current operation (as we do while manipulating the lists) so that we
- // can detect and cleanup (a) and (b).
-
- int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
- if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- Addr node_address(0);
- if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
- block_files_.DeleteBlock(entry_address, false);
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- scoped_refptr<EntryImpl> cache_entry(
- new EntryImpl(this, entry_address, false));
- IncreaseNumRefs();
-
- if (!cache_entry->CreateEntry(node_address, key, hash)) {
- block_files_.DeleteBlock(entry_address, false);
- block_files_.DeleteBlock(node_address, false);
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- cache_entry->BeginLogging(net_log_, true);
-
- // We are not failing the operation; let's add this to the map.
- open_entries_[entry_address.value()] = cache_entry;
-
- // Save the entry.
- cache_entry->entry()->Store();
- cache_entry->rankings()->Store();
- IncreaseNumEntries();
- entry_count_++;
-
- // Link this entry through the index.
- if (parent.get()) {
- parent->SetNextAddress(entry_address);
- } else {
- data_->table[hash & mask_] = entry_address.value();
- }
-
- // Link this entry through the lists.
- eviction_.OnCreateEntry(cache_entry);
-
- CACHE_UMA(AGE_MS, "CreateTime", 0, start);
- stats_.OnEvent(Stats::CREATE_HIT);
- SIMPLE_STATS_COUNTER("disk_cache.miss");
- Trace("create entry hit ");
- FlushIndex();
- cache_entry->AddRef();
- return cache_entry.get();
-}
-
-void BackendImplV3::LogStats() {
- StatsItems stats;
- GetStats(&stats);
-
- for (size_t index = 0; index < stats.size(); index++)
- VLOG(1) << stats[index].first << ": " << stats[index].second;
-}
-
-void BackendImplV3::ReportStats() {
- IndexHeaderV3* header = index_.header();
- CACHE_UMA(COUNTS, "Entries", header->num_entries);
-
- int current_size = header->num_bytes / (1024 * 1024);
- int max_size = max_size_ / (1024 * 1024);
-
- CACHE_UMA(COUNTS_10000, "Size", current_size);
- CACHE_UMA(COUNTS_10000, "MaxSize", max_size);
- if (!max_size)
- max_size++;
- CACHE_UMA(PERCENTAGE, "UsedSpace", current_size * 100 / max_size);
-
- CACHE_UMA(COUNTS_10000, "AverageOpenEntries",
- static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
- CACHE_UMA(COUNTS_10000, "MaxOpenEntries",
- static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
- stats_.SetCounter(Stats::MAX_ENTRIES, 0);
-
- CACHE_UMA(COUNTS_10000, "TotalFatalErrors",
- static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
- CACHE_UMA(COUNTS_10000, "TotalDoomCache",
- static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
- CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries",
- static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
- stats_.SetCounter(Stats::FATAL_ERROR, 0);
- stats_.SetCounter(Stats::DOOM_CACHE, 0);
- stats_.SetCounter(Stats::DOOM_RECENT, 0);
-
- int64_t total_hours = stats_.GetCounter(Stats::TIMER) / 120;
- if (!(header->flags & CACHE_EVICTED)) {
- CACHE_UMA(HOURS, "TotalTimeNotFull", static_cast<int>(total_hours));
- return;
- }
-
- // This is an up to date client that will report FirstEviction() data. After
- // that event, start reporting this:
-
- CACHE_UMA(HOURS, "TotalTime", static_cast<int>(total_hours));
-
- int64_t use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
- stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
-
- // We may see users with no use_hours at this point if this is the first time
- // we are running this code.
- if (use_hours)
- use_hours = total_hours - use_hours;
-
- if (!use_hours || !GetEntryCount() || !header->num_bytes)
- return;
-
- CACHE_UMA(HOURS, "UseTime", static_cast<int>(use_hours));
-
- int64_t trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
- CACHE_UMA(COUNTS, "TrimRate", static_cast<int>(trim_rate));
-
- int avg_size = header->num_bytes / GetEntryCount();
- CACHE_UMA(COUNTS, "EntrySize", avg_size);
- CACHE_UMA(COUNTS, "EntriesFull", header->num_entries);
-
- int large_entries_bytes = stats_.GetLargeEntriesSize();
- int large_ratio = large_entries_bytes * 100 / header->num_bytes;
- CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", large_ratio);
-
- if (!lru_eviction_) {
- CACHE_UMA(PERCENTAGE, "ResurrectRatio", stats_.GetResurrectRatio());
- CACHE_UMA(PERCENTAGE, "NoUseRatio",
- header->num_no_use_entries * 100 / header->num_entries);
- CACHE_UMA(PERCENTAGE, "LowUseRatio",
- header->num_low_use_entries * 100 / header->num_entries);
- CACHE_UMA(PERCENTAGE, "HighUseRatio",
- header->num_high_use_entries * 100 / header->num_entries);
- CACHE_UMA(PERCENTAGE, "DeletedRatio",
- header->num_evicted_entries * 100 / header->num_entries);
- }
-
- stats_.ResetRatios();
- stats_.SetCounter(Stats::TRIM_ENTRY, 0);
-
- if (cache_type_ == net::DISK_CACHE)
- block_files_.ReportStats();
-}
-
-void BackendImplV3::ReportError(int error) {
- STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
- error == ERR_CACHE_CREATED);
-
- // We transmit positive numbers, instead of direct error codes.
- DCHECK_LE(error, 0);
- CACHE_UMA(CACHE_ERROR, "Error", error * -1);
-}
-
-bool BackendImplV3::CheckIndex() {
- DCHECK(data_);
-
- size_t current_size = index_->GetLength();
- if (current_size < sizeof(Index)) {
- LOG(ERROR) << "Corrupt Index file";
- return false;
- }
-
- if (new_eviction_) {
- // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
- if (kIndexMagic != data_->header.magic ||
- kCurrentVersion >> 16 != data_->header.version >> 16) {
- LOG(ERROR) << "Invalid file version or magic";
- return false;
- }
- if (kCurrentVersion == data_->header.version) {
- // We need file version 2.1 for the new eviction algorithm.
- UpgradeTo2_1();
- }
- } else {
- if (kIndexMagic != data_->header.magic ||
- kCurrentVersion != data_->header.version) {
- LOG(ERROR) << "Invalid file version or magic";
- return false;
- }
- }
-
- if (!data_->header.table_len) {
- LOG(ERROR) << "Invalid table size";
- return false;
- }
-
- if (current_size < GetIndexSize(data_->header.table_len) ||
- data_->header.table_len & (kBaseTableLen - 1)) {
- LOG(ERROR) << "Corrupt Index file";
- return false;
- }
-
- AdjustMaxCacheSize(data_->header.table_len);
-
-#if !defined(NET_BUILD_STRESS_CACHE)
- if (data_->header.num_bytes < 0 ||
- (max_size_ < std::numeric_limits<int32_t>::max() - kDefaultCacheSize &&
- data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
- LOG(ERROR) << "Invalid cache (current) size";
- return false;
- }
-#endif
-
- if (data_->header.num_entries < 0) {
- LOG(ERROR) << "Invalid number of entries";
- return false;
- }
-
- if (!mask_)
- mask_ = data_->header.table_len - 1;
-
- // Load the table into memory with a single read.
- scoped_ptr<char[]> buf(new char[current_size]);
- return index_->Read(buf.get(), current_size, 0);
-}
-
-int BackendImplV3::CheckAllEntries() {
- int num_dirty = 0;
- int num_entries = 0;
- DCHECK(mask_ < std::numeric_limits<uint32_t>::max());
- for (unsigned int i = 0; i <= mask_; i++) {
- Addr address(data_->table[i]);
- if (!address.is_initialized())
- continue;
- for (;;) {
- EntryImpl* tmp;
- int ret = NewEntry(address, &tmp);
- if (ret) {
- STRESS_NOTREACHED();
- return ret;
- }
- scoped_refptr<EntryImpl> cache_entry;
- cache_entry.swap(&tmp);
-
- if (cache_entry->dirty())
- num_dirty++;
- else if (CheckEntry(cache_entry.get()))
- num_entries++;
- else
- return ERR_INVALID_ENTRY;
-
- DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
- address.set_value(cache_entry->GetNextAddress());
- if (!address.is_initialized())
- break;
- }
- }
-
- Trace("CheckAllEntries End");
- if (num_entries + num_dirty != data_->header.num_entries) {
- LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
- " " << data_->header.num_entries;
- DCHECK_LT(num_entries, data_->header.num_entries);
- return ERR_NUM_ENTRIES_MISMATCH;
- }
-
- return num_dirty;
-}
-
-bool BackendImplV3::CheckEntry(EntryImpl* cache_entry) {
- bool ok = block_files_.IsValid(cache_entry->entry()->address());
- ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
- EntryStore* data = cache_entry->entry()->Data();
- for (size_t i = 0; i < arraysize(data->data_addr); i++) {
- if (data->data_addr[i]) {
- Addr address(data->data_addr[i]);
- if (address.is_block_file())
- ok = ok && block_files_.IsValid(address);
- }
- }
-
- return ok && cache_entry->rankings()->VerifyHash();
-}
-
-int BackendImplV3::MaxBuffersSize() {
- static int64_t total_memory = base::SysInfo::AmountOfPhysicalMemory();
- static bool done = false;
-
- if (!done) {
- const int kMaxBuffersSize = 30 * 1024 * 1024;
-
- // We want to use up to 2% of the computer's memory.
- total_memory = total_memory * 2 / 100;
- if (total_memory > kMaxBuffersSize || total_memory <= 0)
- total_memory = kMaxBuffersSize;
-
- done = true;
- }
-
- return static_cast<int>(total_memory);
-}
-
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-bool BackendImplV3::IsAllocAllowed(int current_size, int new_size) {
- return false;
-}
-
-net::CacheType BackendImplV3::GetCacheType() const {
- return cache_type_;
-}
-
-int32_t BackendImplV3::GetEntryCount() const {
- return 0;
-}
-
-int BackendImplV3::OpenEntry(const std::string& key, Entry** entry,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int BackendImplV3::CreateEntry(const std::string& key, Entry** entry,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int BackendImplV3::DoomEntry(const std::string& key,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int BackendImplV3::DoomEntriesBetween(base::Time initial_time,
- base::Time end_time,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int BackendImplV3::DoomEntriesSince(base::Time initial_time,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int BackendImplV3::CalculateSizeOfAllEntries(
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-class BackendImplV3::NotImplementedIterator : public Backend::Iterator {
- public:
- int OpenNextEntry(disk_cache::Entry** next_entry,
- const net::CompletionCallback& callback) override {
- return net::ERR_NOT_IMPLEMENTED;
- }
-};
-
-scoped_ptr<Backend::Iterator> BackendImplV3::CreateIterator() {
- return scoped_ptr<Iterator>(new NotImplementedIterator());
-}
-
-void BackendImplV3::GetStats(StatsItems* stats) {
- NOTIMPLEMENTED();
-}
-
-void BackendImplV3::OnExternalCacheHit(const std::string& key) {
- NOTIMPLEMENTED();
-}
-
-void BackendImplV3::CleanupCache() {
-}
-
-} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/backend_impl_v3.h b/chromium/net/disk_cache/blockfile/backend_impl_v3.h
deleted file mode 100644
index df51c2137af..00000000000
--- a/chromium/net/disk_cache/blockfile/backend_impl_v3.h
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// See net/disk_cache/disk_cache.h for the public interface of the cache.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_BACKEND_IMPL_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_BACKEND_IMPL_V3_H_
-
-#include <stdint.h>
-
-#include "base/containers/hash_tables.h"
-#include "base/files/file_path.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/timer/timer.h"
-#include "net/disk_cache/blockfile/block_bitmaps_v3.h"
-#include "net/disk_cache/blockfile/block_files.h"
-#include "net/disk_cache/blockfile/eviction_v3.h"
-#include "net/disk_cache/blockfile/index_table_v3.h"
-#include "net/disk_cache/blockfile/stats.h"
-#include "net/disk_cache/blockfile/stress_support.h"
-#include "net/disk_cache/blockfile/trace.h"
-#include "net/disk_cache/disk_cache.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-} // namespace base
-
-namespace net {
-class NetLog;
-} // namespace net
-
-namespace disk_cache {
-
-class EntryImplV3;
-
-// This class implements the Backend interface. An object of this
-// class handles the operations of the cache for a particular profile.
-class NET_EXPORT_PRIVATE BackendImplV3 : public Backend {
- public:
- enum BackendFlags {
- MAX_SIZE = 1 << 1, // A maximum size was provided.
- UNIT_TEST_MODE = 1 << 2, // We are modifying the behavior for testing.
- UPGRADE_MODE = 1 << 3, // This is the upgrade tool (dump).
- EVICTION_V2 = 1 << 4, // Use of new eviction was specified.
- BASIC_UNIT_TEST = 1 << 5, // Identifies almost all unit tests.
- NO_LOAD_PROTECTION = 1 << 6, // Don't act conservatively under load.
- NO_BUFFERING = 1 << 7, // Disable extended IO buffering.
- NO_CLEAN_ON_EXIT = 1 << 8 // Avoid saving data at exit time.
- };
-
- BackendImplV3(const base::FilePath& path,
- const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
- net::NetLog* net_log);
- ~BackendImplV3() override;
-
- // Performs general initialization for this current instance of the cache.
- int Init(const CompletionCallback& callback);
-
- // Sets the maximum size for the total amount of data stored by this instance.
- bool SetMaxSize(int max_bytes);
-
- // Sets the cache type for this backend.
- void SetType(net::CacheType type);
-
- // Creates a new storage block of size block_count.
- bool CreateBlock(FileType block_type, int block_count,
- Addr* block_address);
-
- // Updates the ranking information for an entry.
- void UpdateRank(EntryImplV3* entry, bool modified);
-
- // Permanently deletes an entry, but still keeps track of it.
- void InternalDoomEntry(EntryImplV3* entry);
-
- // This method must be called when an entry is released for the last time, so
- // the entry should not be used anymore. |address| is the cache address of the
- // entry.
- void OnEntryDestroyBegin(Addr address);
-
- // This method must be called after all resources for an entry have been
- // released.
- void OnEntryDestroyEnd();
-
- // If the |address| corresponds to an open entry, returns a pointer to that
- // entry, otherwise returns NULL. Note that this method does not increase the
- // ref counter for the entry.
- EntryImplV3* GetOpenEntry(Addr address) const;
-
- // Returns the id being used on this run of the cache.
- int32_t GetCurrentEntryId() const;
-
- // Returns the maximum size for a file to reside on the cache.
- int MaxFileSize() const;
-
- // A user data block is being created, extended or truncated.
- void ModifyStorageSize(int32_t old_size, int32_t new_size);
-
- // Logs requests that are denied due to being too big.
- void TooMuchStorageRequested(int32_t size);
-
- // Returns true if a temporary buffer is allowed to be extended.
- bool IsAllocAllowed(int current_size, int new_size);
-
- // Tracks the release of |size| bytes by an entry buffer.
- void BufferDeleted(int size);
-
- // Only intended for testing the two previous methods.
- int GetTotalBuffersSize() const {
- return buffer_bytes_;
- }
-
- // Returns true if this instance seems to be under heavy load.
- bool IsLoaded() const;
-
- // Returns the full histogram name, for the given base |name| and the current
- // cache type. The name will be "DiskCache3.name_type".
- std::string HistogramName(const char* name) const;
-
- net::CacheType cache_type() const {
- return cache_type_;
- }
-
- bool read_only() const {
- return read_only_;
- }
-
- // Returns a weak pointer to this object.
- base::WeakPtr<BackendImplV3> GetWeakPtr();
-
- // Returns true if we should send histograms for this user again. The caller
- // must call this function only once per run (because it returns always the
- // same thing on a given run).
- bool ShouldReportAgain();
-
- // Reports some data when we filled up the cache.
- void FirstEviction();
-
- // Called when an interesting event should be logged (counted).
- void OnEvent(Stats::Counters an_event);
-
- // Keeps track of payload access (doesn't include metadata).
- void OnRead(int bytes);
- void OnWrite(int bytes);
-
- // Timer callback to calculate usage statistics and perform backups.
- void OnTimerTick();
-
- // Sets internal parameters to enable unit testing mode.
- void SetUnitTestMode();
-
- // Sets internal parameters to enable upgrade mode (for internal tools).
- void SetUpgradeMode();
-
- // Sets the eviction algorithm to version 2.
- void SetNewEviction();
-
- // Sets an explicit set of BackendFlags.
- void SetFlags(uint32_t flags);
-
- // Sends a dummy operation through the operation queue, for unit tests.
- int FlushQueueForTest(const CompletionCallback& callback);
-
- // Trims an entry (all if |empty| is true) from the list of deleted
- // entries. This method should be called directly on the cache thread.
- void TrimForTest(bool empty);
-
- // Trims an entry (all if |empty| is true) from the list of deleted
- // entries. This method should be called directly on the cache thread.
- void TrimDeletedListForTest(bool empty);
-
- // Performs a simple self-check, and returns the number of dirty items
- // or an error code (negative value).
- int SelfCheck();
-
- // Backend implementation.
- net::CacheType GetCacheType() const override;
- int32_t GetEntryCount() const override;
- int OpenEntry(const std::string& key,
- Entry** entry,
- const CompletionCallback& callback) override;
- int CreateEntry(const std::string& key,
- Entry** entry,
- const CompletionCallback& callback) override;
- int DoomEntry(const std::string& key,
- const CompletionCallback& callback) override;
- int DoomAllEntries(const CompletionCallback& callback) override;
- int DoomEntriesBetween(base::Time initial_time,
- base::Time end_time,
- const CompletionCallback& callback) override;
- int DoomEntriesSince(base::Time initial_time,
- const CompletionCallback& callback) override;
- int CalculateSizeOfAllEntries(const CompletionCallback& callback) override;
- scoped_ptr<Iterator> CreateIterator() override;
- void GetStats(StatsItems* stats) override;
- void OnExternalCacheHit(const std::string& key) override;
-
- private:
- friend class EvictionV3;
- typedef base::hash_map<CacheAddr, EntryImplV3*> EntriesMap;
- class IteratorImpl;
- class NotImplementedIterator;
- class Worker;
-
- void AdjustMaxCacheSize();
- bool InitStats(void* stats_data);
- void StoreStats();
-
- // Deletes the cache and starts again.
- void RestartCache(bool failure);
- void PrepareForRestart();
-
- // Performs final cleanup.
- void CleanupCache();
-
- // Creates a new entry object. Returns zero on success, or a disk_cache error
- // on failure.
- int NewEntry(Addr address, EntryImplV3** entry);
-
- // Handles the used storage count.
- void AddStorageSize(int32_t bytes);
- void SubstractStorageSize(int32_t bytes);
-
- // Update the number of referenced cache entries.
- void IncreaseNumRefs();
- void DecreaseNumRefs();
- void IncreaseNumEntries();
- void DecreaseNumEntries();
-
- // Dumps current cache statistics to the log.
- void LogStats();
-
- // Send UMA stats.
- void ReportStats();
-
- // Reports an uncommon, recoverable error.
- void ReportError(int error);
-
- // Performs basic checks on the index file. Returns false on failure.
- bool CheckIndex();
-
- // Part of the self test. Returns the number or dirty entries, or an error.
- int CheckAllEntries();
-
- // Part of the self test. Returns false if the entry is corrupt.
- bool CheckEntry(EntryImplV3* cache_entry);
-
- // Returns the maximum total memory for the memory buffers.
- int MaxBuffersSize();
-
- IndexTable index_;
- base::FilePath path_; // Path to the folder used as backing storage.
- BlockBitmaps block_files_;
- int32_t max_size_; // Maximum data size for this instance.
- EvictionV3 eviction_; // Handler of the eviction algorithm.
- EntriesMap open_entries_;
- int num_refs_; // Number of referenced cache entries.
- int max_refs_; // Max number of referenced cache entries.
- int entry_count_; // Number of entries accessed lately.
- int byte_count_; // Number of bytes read/written lately.
- int buffer_bytes_; // Total size of the temporary entries' buffers.
- int up_ticks_; // The number of timer ticks received (OnTimerTick).
- net::CacheType cache_type_;
- int uma_report_; // Controls transmission of UMA data.
- uint32_t user_flags_; // Flags set by the user.
- bool init_; // controls the initialization of the system.
- bool restarted_;
- bool read_only_; // Prevents updates of the rankings data (used by tools).
- bool disabled_;
- bool lru_eviction_; // What eviction algorithm should be used.
- bool first_timer_; // True if the timer has not been called.
- bool user_load_; // True if we see a high load coming from the caller.
-
- net::NetLog* net_log_;
-
- Stats stats_; // Usage statistics.
- scoped_ptr<base::RepeatingTimer> timer_; // Usage timer.
- scoped_refptr<TraceObject> trace_object_; // Initializes internal tracing.
- base::WeakPtrFactory<BackendImplV3> ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(BackendImplV3);
-};
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_BACKEND_IMPL_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/backend_worker_v3.cc b/chromium/net/disk_cache/blockfile/backend_worker_v3.cc
deleted file mode 100644
index 897ee3c3d39..00000000000
--- a/chromium/net/disk_cache/blockfile/backend_worker_v3.cc
+++ /dev/null
@@ -1,471 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/backend_worker_v3.h"
-
-#include <stdint.h>
-
-#include <limits>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/time/time.h"
-#include "base/timer/timer.h"
-#include "net/base/net_errors.h"
-#include "net/disk_cache/blockfile/errors.h"
-#include "net/disk_cache/blockfile/experiments.h"
-#include "net/disk_cache/blockfile/file.h"
-
-using base::Time;
-using base::TimeDelta;
-using base::TimeTicks;
-
-namespace {
-
-#if defined(V3_NOT_JUST_YET_READY)
-
-const char kIndexName[] = "index";
-
-// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
-// Note that the actual target is to keep the index table load factor under 55%
-// for most users.
-const int k64kEntriesStore = 240 * 1000 * 1000;
-const int kBaseTableLen = 64 * 1024;
-const int kDefaultCacheSize = 80 * 1024 * 1024;
-
-// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
-const int kTrimDelay = 10;
-
-int DesiredIndexTableLen(int32_t storage_size) {
- if (storage_size <= k64kEntriesStore)
- return kBaseTableLen;
- if (storage_size <= k64kEntriesStore * 2)
- return kBaseTableLen * 2;
- if (storage_size <= k64kEntriesStore * 4)
- return kBaseTableLen * 4;
- if (storage_size <= k64kEntriesStore * 8)
- return kBaseTableLen * 8;
-
- // The biggest storage_size for int32_t requires a 4 MB table.
- return kBaseTableLen * 16;
-}
-
-int MaxStorageSizeForTable(int table_len) {
- return table_len * (k64kEntriesStore / kBaseTableLen);
-}
-
-size_t GetIndexSize(int table_len) {
- size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
- return sizeof(disk_cache::IndexHeader) + table_size;
-}
-
-// ------------------------------------------------------------------------
-
-// Sets group for the current experiment. Returns false if the files should be
-// discarded.
-bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
- if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
- header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
- // Discard current cache.
- return false;
- }
-
- if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
- "ExperimentControl") {
- if (cache_created) {
- header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
- return true;
- } else if (header->experiment != disk_cache::EXPERIMENT_SIMPLE_CONTROL) {
- return false;
- }
- }
-
- header->experiment = disk_cache::NO_EXPERIMENT;
- return true;
-}
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-} // namespace
-
-// ------------------------------------------------------------------------
-
-namespace disk_cache {
-
-BackendImplV3::Worker::Worker(
- const base::FilePath& path,
- const scoped_refptr<base::SingleThreadTaskRunner>& main_thread)
- : path_(path), block_files_(path), init_(false) {
-}
-
-#if defined(V3_NOT_JUST_YET_READY)
-
-int BackendImpl::SyncInit() {
-#if defined(NET_BUILD_STRESS_CACHE)
- // Start evictions right away.
- up_ticks_ = kTrimDelay * 2;
-#endif
- DCHECK(!init_);
- if (init_)
- return net::ERR_FAILED;
-
- bool create_files = false;
- if (!InitBackingStore(&create_files)) {
- ReportError(ERR_STORAGE_ERROR);
- return net::ERR_FAILED;
- }
-
- num_refs_ = num_pending_io_ = max_refs_ = 0;
- entry_count_ = byte_count_ = 0;
-
- if (!restarted_) {
- buffer_bytes_ = 0;
- trace_object_ = TraceObject::GetTraceObject();
- // Create a recurrent timer of 30 secs.
- int timer_delay = unit_test_ ? 1000 : 30000;
- timer_.reset(new base::RepeatingTimer());
- timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
- &BackendImpl::OnStatsTimer);
- }
-
- init_ = true;
- Trace("Init");
-
- if (data_->header.experiment != NO_EXPERIMENT &&
- cache_type_ != net::DISK_CACHE) {
- // No experiment for other caches.
- return net::ERR_FAILED;
- }
-
- if (!(user_flags_ & kNoRandom)) {
- // The unit test controls directly what to test.
- new_eviction_ = (cache_type_ == net::DISK_CACHE);
- }
-
- if (!CheckIndex()) {
- ReportError(ERR_INIT_FAILED);
- return net::ERR_FAILED;
- }
-
- if (!restarted_ && (create_files || !data_->header.num_entries))
- ReportError(ERR_CACHE_CREATED);
-
- if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
- !InitExperiment(&data_->header, create_files)) {
- return net::ERR_FAILED;
- }
-
- // We don't care if the value overflows. The only thing we care about is that
- // the id cannot be zero, because that value is used as "not dirty".
- // Increasing the value once per second gives us many years before we start
- // having collisions.
- data_->header.this_id++;
- if (!data_->header.this_id)
- data_->header.this_id++;
-
- bool previous_crash = (data_->header.crash != 0);
- data_->header.crash = 1;
-
- if (!block_files_.Init(create_files))
- return net::ERR_FAILED;
-
- // We want to minimize the changes to cache for an AppCache.
- if (cache_type() == net::APP_CACHE) {
- DCHECK(!new_eviction_);
- read_only_ = true;
- } else if (cache_type() == net::SHADER_CACHE) {
- DCHECK(!new_eviction_);
- }
-
- eviction_.Init(this);
-
- // stats_ and rankings_ may end up calling back to us so we better be enabled.
- disabled_ = false;
- if (!InitStats())
- return net::ERR_FAILED;
-
- disabled_ = !rankings_.Init(this, new_eviction_);
-
-#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
- trace_object_->EnableTracing(false);
- int sc = SelfCheck();
- if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
- NOTREACHED();
- trace_object_->EnableTracing(true);
-#endif
-
- if (previous_crash) {
- ReportError(ERR_PREVIOUS_CRASH);
- } else if (!restarted_) {
- ReportError(ERR_NO_ERROR);
- }
-
- FlushIndex();
-
- return disabled_ ? net::ERR_FAILED : net::OK;
-}
-
-void BackendImpl::PrepareForRestart() {
- // Reset the mask_ if it was not given by the user.
- if (!(user_flags_ & kMask))
- mask_ = 0;
-
- if (!(user_flags_ & kNewEviction))
- new_eviction_ = false;
-
- disabled_ = true;
- data_->header.crash = 0;
- index_->Flush();
- index_ = NULL;
- data_ = NULL;
- block_files_.CloseFiles();
- rankings_.Reset();
- init_ = false;
- restarted_ = true;
-}
-
-BackendImpl::~BackendImpl() {
- if (user_flags_ & kNoRandom) {
- // This is a unit test, so we want to be strict about not leaking entries
- // and completing all the work.
- background_queue_.WaitForPendingIO();
- } else {
- // This is most likely not a test, so we want to do as little work as
- // possible at this time, at the price of leaving dirty entries behind.
- background_queue_.DropPendingIO();
- }
-
- if (background_queue_.BackgroundIsCurrentThread()) {
- // Unit tests may use the same thread for everything.
- CleanupCache();
- } else {
- background_queue_.background_thread()->PostTask(
- FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
- // http://crbug.com/74623
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
- done_.Wait();
- }
-}
-
-void BackendImpl::CleanupCache() {
- Trace("Backend Cleanup");
- eviction_.Stop();
- timer_.reset();
-
- if (init_) {
- StoreStats();
- if (data_)
- data_->header.crash = 0;
-
- if (user_flags_ & kNoRandom) {
- // This is a net_unittest, verify that we are not 'leaking' entries.
- File::WaitForPendingIO(&num_pending_io_);
- DCHECK(!num_refs_);
- } else {
- File::DropPendingIO();
- }
- }
- block_files_.CloseFiles();
- FlushIndex();
- index_ = NULL;
- ptr_factory_.InvalidateWeakPtrs();
- done_.Signal();
-}
-
-base::FilePath BackendImpl::GetFileName(Addr address) const {
- if (!address.is_separate_file() || !address.is_initialized()) {
- NOTREACHED();
- return base::FilePath();
- }
-
- std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
- return path_.AppendASCII(tmp);
-}
-
-// We just created a new file so we're going to write the header and set the
-// file length to include the hash table (zero filled).
-bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
- AdjustMaxCacheSize(0);
-
- IndexHeader header;
- header.table_len = DesiredIndexTableLen(max_size_);
-
- // We need file version 2.1 for the new eviction algorithm.
- if (new_eviction_)
- header.version = 0x20001;
-
- header.create_time = Time::Now().ToInternalValue();
-
- if (!file->Write(&header, sizeof(header), 0))
- return false;
-
- return file->SetLength(GetIndexSize(header.table_len));
-}
-
-bool BackendImpl::InitBackingStore(bool* file_created) {
- if (!base::CreateDirectory(path_))
- return false;
-
- base::FilePath index_name = path_.AppendASCII(kIndexName);
-
- int flags = base::PLATFORM_FILE_READ |
- base::PLATFORM_FILE_WRITE |
- base::PLATFORM_FILE_OPEN_ALWAYS |
- base::PLATFORM_FILE_EXCLUSIVE_WRITE;
- scoped_refptr<disk_cache::File> file(new disk_cache::File(
- base::CreatePlatformFile(index_name, flags, file_created, NULL)));
-
- if (!file->IsValid())
- return false;
-
- bool ret = true;
- if (*file_created)
- ret = CreateBackingStore(file.get());
-
- file = NULL;
- if (!ret)
- return false;
-
- index_ = new MappedFile();
- data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
- if (!data_) {
- LOG(ERROR) << "Unable to map Index file";
- return false;
- }
-
- if (index_->GetLength() < sizeof(Index)) {
- // We verify this again on CheckIndex() but it's easier to make sure now
- // that the header is there.
- LOG(ERROR) << "Corrupt Index file";
- return false;
- }
-
- return true;
-}
-
-void BackendImpl::ReportError(int error) {
- STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
- error == ERR_CACHE_CREATED);
-
- // We transmit positive numbers, instead of direct error codes.
- DCHECK_LE(error, 0);
- CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
-}
-
-
-bool BackendImpl::CheckIndex() {
- DCHECK(data_);
-
- size_t current_size = index_->GetLength();
- if (current_size < sizeof(Index)) {
- LOG(ERROR) << "Corrupt Index file";
- return false;
- }
-
- if (new_eviction_) {
- // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
- if (kIndexMagic != data_->header.magic ||
- kCurrentVersion >> 16 != data_->header.version >> 16) {
- LOG(ERROR) << "Invalid file version or magic";
- return false;
- }
- if (kCurrentVersion == data_->header.version) {
- // We need file version 2.1 for the new eviction algorithm.
- UpgradeTo2_1();
- }
- } else {
- if (kIndexMagic != data_->header.magic ||
- kCurrentVersion != data_->header.version) {
- LOG(ERROR) << "Invalid file version or magic";
- return false;
- }
- }
-
- if (!data_->header.table_len) {
- LOG(ERROR) << "Invalid table size";
- return false;
- }
-
- if (current_size < GetIndexSize(data_->header.table_len) ||
- data_->header.table_len & (kBaseTableLen - 1)) {
- LOG(ERROR) << "Corrupt Index file";
- return false;
- }
-
- AdjustMaxCacheSize(data_->header.table_len);
-
-#if !defined(NET_BUILD_STRESS_CACHE)
- if (data_->header.num_bytes < 0 ||
- (max_size_ < std::numeric_limits<int32_t>::max() - kDefaultCacheSize &&
- data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
- LOG(ERROR) << "Invalid cache (current) size";
- return false;
- }
-#endif
-
- if (data_->header.num_entries < 0) {
- LOG(ERROR) << "Invalid number of entries";
- return false;
- }
-
- if (!mask_)
- mask_ = data_->header.table_len - 1;
-
- // Load the table into memory with a single read.
- scoped_ptr<char[]> buf(new char[current_size]);
- return index_->Read(buf.get(), current_size, 0);
-}
-
-bool BackendImpl::InitStats() {
- Addr address(data_->header.stats);
- int size = stats_.StorageSize();
-
- if (!address.is_initialized()) {
- FileType file_type = Addr::RequiredFileType(size);
- DCHECK_NE(file_type, EXTERNAL);
- int num_blocks = Addr::RequiredBlocks(size, file_type);
-
- if (!CreateBlock(file_type, num_blocks, &address))
- return false;
- return stats_.Init(NULL, 0, address);
- }
-
- if (!address.is_block_file()) {
- NOTREACHED();
- return false;
- }
-
- // Load the required data.
- size = address.num_blocks() * address.BlockSize();
- MappedFile* file = File(address);
- if (!file)
- return false;
-
- scoped_ptr<char[]> data(new char[size]);
- size_t offset = address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
- if (!file->Read(data.get(), size, offset))
- return false;
-
- if (!stats_.Init(data.get(), size, address))
- return false;
- if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
- stats_.InitSizeHistogram();
- return true;
-}
-
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-int BackendImplV3::Worker::Init(const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-BackendImplV3::Worker::~Worker() {
-}
-
-} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/backend_worker_v3.h b/chromium/net/disk_cache/blockfile/backend_worker_v3.h
deleted file mode 100644
index fc8b317345e..00000000000
--- a/chromium/net/disk_cache/blockfile/backend_worker_v3.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// See net/disk_cache/disk_cache.h for the public interface of the cache.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_BACKEND_WORKER_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_BACKEND_WORKER_V3_H_
-
-#include "base/containers/hash_tables.h"
-#include "base/files/file_path.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "net/disk_cache/blockfile/addr.h"
-#include "net/disk_cache/blockfile/backend_impl_v3.h"
-#include "net/disk_cache/blockfile/block_files.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-} // namespace base
-
-namespace disk_cache {
-
-class BackendImplV3::Worker : public base::RefCountedThreadSafe<Worker> {
- public:
- Worker(const base::FilePath& path,
- const scoped_refptr<base::SingleThreadTaskRunner>& main_thread);
-
- // Performs general initialization for this current instance of the cache.
- int Init(const CompletionCallback& callback);
-
- private:
- friend class base::RefCountedThreadSafe<Worker>;
-
- ~Worker();
- void CleanupCache();
-
- // Returns the full name for an external storage file.
- base::FilePath GetFileName(Addr address) const;
-
- // Creates a new backing file for the cache index.
- bool CreateBackingStore(disk_cache::File* file);
- bool InitBackingStore(bool* file_created);
-
- // Performs basic checks on the index file. Returns false on failure.
- bool CheckIndex();
-
- base::FilePath path_; // Path to the folder used as backing storage.
- BlockFiles block_files_; // Set of files used to store all data.
- bool init_; // controls the initialization of the system.
-
- DISALLOW_COPY_AND_ASSIGN(Worker);
-};
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_BACKEND_WORKER_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc b/chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc
deleted file mode 100644
index 70208aef74f..00000000000
--- a/chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/block_bitmaps_v3.h"
-
-#include "base/metrics/histogram_macros.h"
-#include "base/time/time.h"
-#include "net/disk_cache/blockfile/disk_format_base.h"
-#include "net/disk_cache/blockfile/trace.h"
-
-using base::TimeTicks;
-
-namespace disk_cache {
-
-BlockBitmaps::BlockBitmaps() {
-}
-
-BlockBitmaps::~BlockBitmaps() {
-}
-
-void BlockBitmaps::Init(const BlockFilesBitmaps& bitmaps) {
- bitmaps_ = bitmaps;
-}
-
-bool BlockBitmaps::CreateBlock(FileType block_type,
- int block_count,
- Addr* block_address) {
- DCHECK_NE(block_type, EXTERNAL);
- DCHECK_NE(block_type, RANKINGS);
- if (block_count < 1 || block_count > kMaxNumBlocks)
- return false;
-
- int header_num = HeaderNumberForNewBlock(block_type, block_count);
- if (header_num < 0)
- return false;
-
- int index;
- if (!bitmaps_[header_num].CreateMapBlock(block_count, &index))
- return false;
-
- if (!index && (block_type == BLOCK_ENTRIES || block_type == BLOCK_EVICTED) &&
- !bitmaps_[header_num].CreateMapBlock(block_count, &index)) {
- // index 0 for entries is a reserved value.
- return false;
- }
-
- Addr address(block_type, block_count, bitmaps_[header_num].FileId(), index);
- block_address->set_value(address.value());
- Trace("CreateBlock 0x%x", address.value());
- return true;
-}
-
-void BlockBitmaps::DeleteBlock(Addr address) {
- if (!address.is_initialized() || address.is_separate_file())
- return;
-
- int header_num = GetHeaderNumber(address);
- if (header_num < 0)
- return;
-
- Trace("DeleteBlock 0x%x", address.value());
- bitmaps_[header_num].DeleteMapBlock(address.start_block(),
- address.num_blocks());
-}
-
-void BlockBitmaps::Clear() {
- bitmaps_.clear();
-}
-
-void BlockBitmaps::ReportStats() {
- int used_blocks[kFirstAdditionalBlockFile];
- int load[kFirstAdditionalBlockFile];
- for (int16_t i = 0; i < kFirstAdditionalBlockFile; i++) {
- GetFileStats(i, &used_blocks[i], &load[i]);
- }
- UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_0", used_blocks[0]);
- UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_1", used_blocks[1]);
- UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_2", used_blocks[2]);
- UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_3", used_blocks[3]);
-
- UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_0", load[0], 101);
- UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_1", load[1], 101);
- UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_2", load[2], 101);
- UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_3", load[3], 101);
-}
-
-bool BlockBitmaps::IsValid(Addr address) {
-#ifdef NDEBUG
- return true;
-#else
- if (!address.is_initialized() || address.is_separate_file())
- return false;
-
- int header_num = GetHeaderNumber(address);
- if (header_num < 0)
- return false;
-
- bool rv = bitmaps_[header_num].UsedMapBlock(address.start_block(),
- address.num_blocks());
- DCHECK(rv);
- return rv;
-#endif
-}
-
-int BlockBitmaps::GetHeaderNumber(Addr address) {
- DCHECK_GE(bitmaps_.size(), kFirstAdditionalBlockFileV3);
- DCHECK(address.is_block_file() || !address.is_initialized());
- if (!address.is_initialized())
- return -1;
-
- int file_index = address.FileNumber();
- if (static_cast<unsigned int>(file_index) >= bitmaps_.size())
- return -1;
-
- return file_index;
-}
-
-int BlockBitmaps::HeaderNumberForNewBlock(FileType block_type,
- int block_count) {
- DCHECK_GT(block_type, 0);
- int header_num = block_type - 1;
- bool found = true;
-
- TimeTicks start = TimeTicks::Now();
- while (bitmaps_[header_num].NeedToGrowBlockFile(block_count)) {
- header_num = bitmaps_[header_num].NextFileId();
- if (!header_num) {
- found = false;
- break;
- }
- }
-
- if (!found) {
- // Restart the search, looking for any file with space. We know that all
- // files of this type are low on free blocks, but we cannot grow any file
- // at this time.
- header_num = block_type - 1;
- do {
- if (bitmaps_[header_num].CanAllocate(block_count)) {
- found = true; // Make sure file 0 is not mistaken with a failure.
- break;
- }
- header_num = bitmaps_[header_num].NextFileId();
- } while (header_num);
-
- if (!found)
- header_num = -1;
- }
-
- LOCAL_HISTOGRAM_TIMES("DiskCache.GetFileForNewBlock",
- TimeTicks::Now() - start);
- return header_num;
-}
-
-// We are interested in the total number of blocks used by this file type, and
-// the max number of blocks that we can store (reported as the percentage of
-// used blocks). In order to find out the number of used blocks, we have to
-// substract the empty blocks from the total blocks for each file in the chain.
-void BlockBitmaps::GetFileStats(int index, int* used_count, int* load) {
- int max_blocks = 0;
- *used_count = 0;
- *load = 0;
- do {
- int capacity = bitmaps_[index].Capacity();
- int used = capacity - bitmaps_[index].EmptyBlocks();
- DCHECK_GE(used, 0);
-
- max_blocks += capacity;
- *used_count += used;
-
- index = bitmaps_[index].NextFileId();
- } while (index);
-
- if (max_blocks)
- *load = *used_count * 100 / max_blocks;
-}
-
-} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/block_bitmaps_v3.h b/chromium/net/disk_cache/blockfile/block_bitmaps_v3.h
deleted file mode 100644
index b2ca46141e7..00000000000
--- a/chromium/net/disk_cache/blockfile/block_bitmaps_v3.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// See net/disk_cache/disk_cache.h for the public interface.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_BLOCK_BITMAPS_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_BLOCK_BITMAPS_V3_H_
-
-#include "base/files/file_path.h"
-#include "base/macros.h"
-#include "net/base/net_export.h"
-#include "net/disk_cache/blockfile/addr.h"
-#include "net/disk_cache/blockfile/block_files.h"
-
-namespace disk_cache {
-
-class BackendImplV3;
-
-// This class is the interface in the v3 disk cache to the set of files holding
-// cached data that is small enough to not be efficiently stored in a dedicated
-// file (i.e. < kMaxBlockSize). It is primarily used to allocate and free
-// regions in those files used to store data.
-class NET_EXPORT_PRIVATE BlockBitmaps {
- public:
- BlockBitmaps();
- ~BlockBitmaps();
-
- void Init(const BlockFilesBitmaps& bitmaps);
-
- // Creates a new entry on a block file. block_type indicates the size of block
- // to be used (as defined on cache_addr.h), block_count is the number of
- // blocks to allocate, and block_address is the address of the new entry.
- bool CreateBlock(FileType block_type, int block_count, Addr* block_address);
-
- // Removes an entry from the block files.
- void DeleteBlock(Addr address);
-
- // Releases the internal bitmaps. The cache is being purged.
- void Clear();
-
- // Sends UMA stats.
- void ReportStats();
-
- // Returns true if the blocks pointed by a given address are currently used.
- // This method is only intended for debugging.
- bool IsValid(Addr address);
-
- private:
- // Returns the header number that stores a given address.
- int GetHeaderNumber(Addr address);
-
- // Returns the appropriate header to use for a new block.
- int HeaderNumberForNewBlock(FileType block_type, int block_count);
-
- // Retrieves stats for the given file index.
- void GetFileStats(int index, int* used_count, int* load);
-
- BlockFilesBitmaps bitmaps_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockBitmaps);
-};
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_BLOCK_BITMAPS_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/block_bitmaps_v3_unittest.cc b/chromium/net/disk_cache/blockfile/block_bitmaps_v3_unittest.cc
deleted file mode 100644
index 64ca115cded..00000000000
--- a/chromium/net/disk_cache/blockfile/block_bitmaps_v3_unittest.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/addr.h"
-#include "net/disk_cache/blockfile/block_bitmaps_v3.h"
-#include "net/disk_cache/blockfile/block_files.h"
-#include "net/disk_cache/blockfile/disk_format_base.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// Tests that we add and remove blocks correctly.
-TEST(DiskCacheBlockBitmaps, V3AllocationMap) {
- disk_cache::BlockBitmaps block_bitmaps;
- disk_cache::BlockFilesBitmaps bitmaps;
-
- const int kNumHeaders = 10;
- disk_cache::BlockFileHeader headers[kNumHeaders];
- for (int i = 0; i < kNumHeaders; i++) {
- memset(&headers[i], 0, sizeof(headers[i]));
- headers[i].magic = disk_cache::kBlockMagic;
- headers[i].version = disk_cache::kBlockCurrentVersion;
- headers[i].this_file = static_cast<int16_t>(i);
- headers[i].empty[3] = 200;
- headers[i].max_entries = 800;
- bitmaps.push_back(disk_cache::BlockHeader(&headers[i]));
- }
-
- block_bitmaps.Init(bitmaps);
-
- // Create a bunch of entries.
- const int kSize = 100;
- disk_cache::Addr address[kSize];
- for (int i = 0; i < kSize; i++) {
- SCOPED_TRACE(i);
- int block_size = i % 4 + 1;
- ASSERT_TRUE(block_bitmaps.CreateBlock(disk_cache::BLOCK_1K, block_size,
- &address[i]));
- EXPECT_EQ(disk_cache::BLOCK_1K, address[i].file_type());
- EXPECT_EQ(block_size, address[i].num_blocks());
- int start = address[i].start_block();
-
- // Verify that the allocated entry doesn't cross a 4 block boundary.
- EXPECT_EQ(start / 4, (start + block_size - 1) / 4);
- }
-
- for (int i = 0; i < kSize; i++) {
- SCOPED_TRACE(i);
- EXPECT_TRUE(block_bitmaps.IsValid(address[i]));
- }
-
- // The first part of the allocation map should be completely filled. We used
- // 10 bits per each of four entries, so 250 bits total. All entries should go
- // to the third file.
- uint8_t* buffer = reinterpret_cast<uint8_t*>(&headers[2].allocation_map);
- for (int i = 0; i < 29; i++) {
- SCOPED_TRACE(i);
- EXPECT_EQ(0xff, buffer[i]);
- }
-
- for (int i = 0; i < kSize; i++) {
- SCOPED_TRACE(i);
- block_bitmaps.DeleteBlock(address[i]);
- }
-
- // The allocation map should be empty.
- for (int i =0; i < 50; i++) {
- SCOPED_TRACE(i);
- EXPECT_EQ(0, buffer[i]);
- }
-}
diff --git a/chromium/net/disk_cache/blockfile/block_files.cc b/chromium/net/disk_cache/blockfile/block_files.cc
index f7cc38178f4..10798dae2bf 100644
--- a/chromium/net/disk_cache/blockfile/block_files.cc
+++ b/chromium/net/disk_cache/blockfile/block_files.cc
@@ -432,7 +432,7 @@ bool BlockFiles::IsValid(Addr address) {
static bool read_contents = false;
if (read_contents) {
- scoped_ptr<char[]> buffer;
+ std::unique_ptr<char[]> buffer;
buffer.reset(new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]);
size_t size = address.BlockSize() * address.num_blocks();
size_t offset = address.start_block() * address.BlockSize() +
diff --git a/chromium/net/disk_cache/blockfile/block_files.h b/chromium/net/disk_cache/blockfile/block_files.h
index 1de3bedb830..056b6a69f0f 100644
--- a/chromium/net/disk_cache/blockfile/block_files.h
+++ b/chromium/net/disk_cache/blockfile/block_files.h
@@ -9,12 +9,12 @@
#include <stdint.h>
+#include <memory>
#include <vector>
#include "base/files/file_path.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
#include "net/base/net_export.h"
#include "net/disk_cache/blockfile/addr.h"
#include "net/disk_cache/blockfile/disk_format_base.h"
@@ -81,7 +81,6 @@ class NET_EXPORT_PRIVATE BlockHeader {
int Size() const;
// Returns a pointer to the underlying BlockFileHeader.
- // TODO(rvargas): This may be removed with the support for V2.
BlockFileHeader* Header();
private:
@@ -157,7 +156,7 @@ class NET_EXPORT_PRIVATE BlockFiles {
char* zero_buffer_; // Buffer to speed-up cleaning deleted entries.
base::FilePath path_; // Path to the backing folder.
std::vector<MappedFile*> block_files_; // The actual files.
- scoped_ptr<base::ThreadChecker> thread_checker_;
+ std::unique_ptr<base::ThreadChecker> thread_checker_;
FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_ZeroSizeFile);
FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_TruncatedFile);
diff --git a/chromium/net/disk_cache/blockfile/disk_cache_perftest.cc b/chromium/net/disk_cache/blockfile/disk_cache_perftest.cc
deleted file mode 100644
index 80806532660..00000000000
--- a/chromium/net/disk_cache/blockfile/disk_cache_perftest.cc
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/hash.h"
-#include "base/strings/string_util.h"
-#include "base/test/perf_time_logger.h"
-#include "base/test/test_file_util.h"
-#include "base/threading/thread.h"
-#include "net/base/cache_type.h"
-#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
-#include "net/base/test_completion_callback.h"
-#include "net/disk_cache/blockfile/backend_impl.h"
-#include "net/disk_cache/blockfile/block_files.h"
-#include "net/disk_cache/disk_cache.h"
-#include "net/disk_cache/disk_cache_test_base.h"
-#include "net/disk_cache/disk_cache_test_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/platform_test.h"
-
-using base::Time;
-
-namespace {
-
-struct TestEntry {
- std::string key;
- int data_len;
-};
-typedef std::vector<TestEntry> TestEntries;
-
-const int kMaxSize = 16 * 1024 - 1;
-
-// Creates num_entries on the cache, and writes 200 bytes of metadata and up
-// to kMaxSize of data to each entry.
-bool TimeWrite(int num_entries, disk_cache::Backend* cache,
- TestEntries* entries) {
- const int kSize1 = 200;
- scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
- scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kMaxSize));
-
- CacheTestFillBuffer(buffer1->data(), kSize1, false);
- CacheTestFillBuffer(buffer2->data(), kMaxSize, false);
-
- int expected = 0;
-
- MessageLoopHelper helper;
- CallbackTest callback(&helper, true);
-
- base::PerfTimeLogger timer("Write disk cache entries");
-
- for (int i = 0; i < num_entries; i++) {
- TestEntry entry;
- entry.key = GenerateKey(true);
- entry.data_len = rand() % kMaxSize;
- entries->push_back(entry);
-
- disk_cache::Entry* cache_entry;
- net::TestCompletionCallback cb;
- int rv = cache->CreateEntry(entry.key, &cache_entry, cb.callback());
- if (net::OK != cb.GetResult(rv))
- break;
- int ret = cache_entry->WriteData(
- 0, 0, buffer1.get(), kSize1,
- base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false);
- if (net::ERR_IO_PENDING == ret)
- expected++;
- else if (kSize1 != ret)
- break;
-
- ret = cache_entry->WriteData(
- 1, 0, buffer2.get(), entry.data_len,
- base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false);
- if (net::ERR_IO_PENDING == ret)
- expected++;
- else if (entry.data_len != ret)
- break;
- cache_entry->Close();
- }
-
- helper.WaitUntilCacheIoFinished(expected);
- timer.Done();
-
- return (expected == helper.callbacks_called());
-}
-
-// Reads the data and metadata from each entry listed on |entries|.
-bool TimeRead(int num_entries, disk_cache::Backend* cache,
- const TestEntries& entries, bool cold) {
- const int kSize1 = 200;
- scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
- scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kMaxSize));
-
- CacheTestFillBuffer(buffer1->data(), kSize1, false);
- CacheTestFillBuffer(buffer2->data(), kMaxSize, false);
-
- int expected = 0;
-
- MessageLoopHelper helper;
- CallbackTest callback(&helper, true);
-
- const char* message = cold ? "Read disk cache entries (cold)" :
- "Read disk cache entries (warm)";
- base::PerfTimeLogger timer(message);
-
- for (int i = 0; i < num_entries; i++) {
- disk_cache::Entry* cache_entry;
- net::TestCompletionCallback cb;
- int rv = cache->OpenEntry(entries[i].key, &cache_entry, cb.callback());
- if (net::OK != cb.GetResult(rv))
- break;
- int ret = cache_entry->ReadData(
- 0, 0, buffer1.get(), kSize1,
- base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
- if (net::ERR_IO_PENDING == ret)
- expected++;
- else if (kSize1 != ret)
- break;
-
- ret = cache_entry->ReadData(
- 1, 0, buffer2.get(), entries[i].data_len,
- base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
- if (net::ERR_IO_PENDING == ret)
- expected++;
- else if (entries[i].data_len != ret)
- break;
- cache_entry->Close();
- }
-
- helper.WaitUntilCacheIoFinished(expected);
- timer.Done();
-
- return (expected == helper.callbacks_called());
-}
-
-int BlockSize() {
- // We can use form 1 to 4 blocks.
- return (rand() & 0x3) + 1;
-}
-
-} // namespace
-
-TEST_F(DiskCacheTest, Hash) {
- int seed = static_cast<int>(Time::Now().ToInternalValue());
- srand(seed);
-
- base::PerfTimeLogger timer("Hash disk cache keys");
- for (int i = 0; i < 300000; i++) {
- std::string key = GenerateKey(true);
- base::Hash(key);
- }
- timer.Done();
-}
-
-TEST_F(DiskCacheTest, CacheBackendPerformance) {
- base::Thread cache_thread("CacheThread");
- ASSERT_TRUE(cache_thread.StartWithOptions(
- base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
-
- ASSERT_TRUE(CleanupCacheDir());
- net::TestCompletionCallback cb;
- scoped_ptr<disk_cache::Backend> cache;
- int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
- net::CACHE_BACKEND_BLOCKFILE,
- cache_path_,
- 0,
- false,
- cache_thread.task_runner(),
- NULL,
- &cache,
- cb.callback());
-
- ASSERT_EQ(net::OK, cb.GetResult(rv));
-
- int seed = static_cast<int>(Time::Now().ToInternalValue());
- srand(seed);
-
- TestEntries entries;
- int num_entries = 1000;
-
- EXPECT_TRUE(TimeWrite(num_entries, cache.get(), &entries));
-
- base::MessageLoop::current()->RunUntilIdle();
- cache.reset();
-
- ASSERT_TRUE(base::EvictFileFromSystemCache(
- cache_path_.AppendASCII("index")));
- ASSERT_TRUE(base::EvictFileFromSystemCache(
- cache_path_.AppendASCII("data_0")));
- ASSERT_TRUE(base::EvictFileFromSystemCache(
- cache_path_.AppendASCII("data_1")));
- ASSERT_TRUE(base::EvictFileFromSystemCache(
- cache_path_.AppendASCII("data_2")));
- ASSERT_TRUE(base::EvictFileFromSystemCache(
- cache_path_.AppendASCII("data_3")));
-
- rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
- net::CACHE_BACKEND_BLOCKFILE,
- cache_path_,
- 0,
- false,
- cache_thread.task_runner(),
- NULL,
- &cache,
- cb.callback());
- ASSERT_EQ(net::OK, cb.GetResult(rv));
-
- EXPECT_TRUE(TimeRead(num_entries, cache.get(), entries, true));
-
- EXPECT_TRUE(TimeRead(num_entries, cache.get(), entries, false));
-
- base::MessageLoop::current()->RunUntilIdle();
-}
-
-// Creating and deleting "entries" on a block-file is something quite frequent
-// (after all, almost everything is stored on block files). The operation is
-// almost free when the file is empty, but can be expensive if the file gets
-// fragmented, or if we have multiple files. This test measures that scenario,
-// by using multiple, highly fragmented files.
-TEST_F(DiskCacheTest, BlockFilesPerformance) {
- ASSERT_TRUE(CleanupCacheDir());
-
- disk_cache::BlockFiles files(cache_path_);
- ASSERT_TRUE(files.Init(true));
-
- int seed = static_cast<int>(Time::Now().ToInternalValue());
- srand(seed);
-
- const int kNumEntries = 60000;
- disk_cache::Addr* address = new disk_cache::Addr[kNumEntries];
-
- base::PerfTimeLogger timer1("Fill three block-files");
-
- // Fill up the 32-byte block file (use three files).
- for (int i = 0; i < kNumEntries; i++) {
- EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, BlockSize(),
- &address[i]));
- }
-
- timer1.Done();
- base::PerfTimeLogger timer2("Create and delete blocks");
-
- for (int i = 0; i < 200000; i++) {
- int entry = rand() * (kNumEntries / RAND_MAX + 1);
- if (entry >= kNumEntries)
- entry = 0;
-
- files.DeleteBlock(address[entry], false);
- EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, BlockSize(),
- &address[entry]));
- }
-
- timer2.Done();
- base::MessageLoop::current()->RunUntilIdle();
- delete[] address;
-}
diff --git a/chromium/net/disk_cache/blockfile/disk_format_v3.h b/chromium/net/disk_cache/blockfile/disk_format_v3.h
deleted file mode 100644
index da873ed4917..00000000000
--- a/chromium/net/disk_cache/blockfile/disk_format_v3.h
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The cache is stored on disk as a collection of block-files, plus an index
-// plus a collection of external files.
-//
-// Any data blob bigger than kMaxBlockSize (disk_cache/addr.h) will be stored in
-// a separate file named f_xxx where x is a hexadecimal number. Shorter data
-// will be stored as a series of blocks on a block-file. In any case, CacheAddr
-// represents the address of the data inside the cache.
-//
-// The index is actually a collection of four files that store a hash table with
-// allocation bitmaps and backup data. Hash collisions are handled directly by
-// the table, which from some point of view behaves like a 4-way associative
-// cache with overflow buckets (so not really open addressing).
-//
-// Basically the hash table is a collection of buckets. The first part of the
-// table has a fixed number of buckets and it is directly addressed by the hash,
-// while the second part of the table (stored on a second file) has a variable
-// number of buckets. Each bucket stores up to four cells (each cell represents
-// a possibl entry). The index bitmap tracks the state of individual cells.
-//
-// The last element of the cache is the block-file. A block file is a file
-// designed to store blocks of data of a given size. For more details see
-// disk_cache/disk_format_base.h
-//
-// A new cache is initialized with a set of block files (named data_0 through
-// data_6), each one dedicated to store blocks of a given size or function. The
-// number at the end of the file name is the block file number (in decimal).
-//
-// There are three "special" types of blocks: normal entries, evicted entries
-// and control data for external files.
-//
-// The files that store internal information for the cache (blocks and index)
-// are memory mapped. They have a location that is signaled every time the
-// internal structures are modified, so it is possible to detect (most of the
-// time) when the process dies in the middle of an update. There are dedicated
-// backup files for cache bitmaps, used to detect entries out of date.
-//
-// Although cache files are to be consumed on the same machine that creates
-// them, if files are to be moved accross machines, little endian storage is
-// assumed.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_DISK_FORMAT_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_DISK_FORMAT_V3_H_
-
-#include <stdint.h>
-#include <string.h>
-
-#include "net/disk_cache/blockfile/disk_format_base.h"
-
-namespace disk_cache {
-
-const int kBaseTableLen = 0x400;
-const uint32_t kIndexMagicV3 = 0xC103CAC3;
-const uint32_t kVersion3 = 0x30000; // Version 3.0.
-
-// Flags for a given cache.
-enum CacheFlags {
- SMALL_CACHE = 1 << 0, // See IndexCell.
- CACHE_EVICTION_2 = 1 << 1, // Keep multiple lists for eviction.
- CACHE_EVICTED = 1 << 2 // Already evicted at least one entry.
-};
-
-// Header for the master index file.
-struct IndexHeaderV3 {
- uint32_t magic;
- uint32_t version;
- int32_t num_entries; // Number of entries currently stored.
- int32_t num_bytes; // Total size of the stored data.
- int32_t last_file; // Last external file created.
- int32_t reserved1;
- CacheAddr stats; // Storage for usage data.
- int32_t table_len; // Actual size of the table.
- int32_t crash; // Signals a previous crash.
- int32_t experiment; // Id of an ongoing test.
- int32_t max_bytes; // Total maximum size of the stored data.
- uint32_t flags;
- int32_t used_cells;
- int32_t max_bucket;
- uint64_t create_time; // Creation time for this set of files.
- uint64_t base_time; // Current base for timestamps.
- uint64_t old_time; // Previous time used for timestamps.
- int32_t max_block_file;
- int32_t num_no_use_entries;
- int32_t num_low_use_entries;
- int32_t num_high_use_entries;
- int32_t reserved;
- int32_t num_evicted_entries;
- int32_t pad[6];
-};
-
-const int kBaseBitmapBytes = 3968;
-// The IndexBitmap is directly saved to a file named index. The file grows in
-// page increments (4096 bytes), but all bits don't have to be in use at any
-// given time. The required file size can be computed from header.table_len.
-struct IndexBitmap {
- IndexHeaderV3 header;
- uint32_t bitmap[kBaseBitmapBytes / 4]; // First page of the bitmap.
-};
-static_assert(sizeof(IndexBitmap) == 4096, "bad IndexHeader");
-
-// Possible states for a given entry.
-enum EntryState {
- ENTRY_FREE = 0, // Available slot.
- ENTRY_NEW, // The entry is being created.
- ENTRY_OPEN, // The entry is being accessed.
- ENTRY_MODIFIED, // The entry is being modified.
- ENTRY_DELETED, // The entry is being deleted.
- ENTRY_FIXING, // Inconsistent state. The entry is being verified.
- ENTRY_USED // The slot is in use (entry is present).
-};
-static_assert(ENTRY_USED <= 7, "state uses 3 bits");
-
-enum EntryGroup {
- ENTRY_NO_USE = 0, // The entry has not been reused.
- ENTRY_LOW_USE, // The entry has low reuse.
- ENTRY_HIGH_USE, // The entry has high reuse.
- ENTRY_RESERVED, // Reserved for future use.
- ENTRY_EVICTED // The entry was deleted.
-};
-static_assert(ENTRY_USED <= 7, "group uses 3 bits");
-
-#pragma pack(push, 1)
-struct IndexCell {
- void Clear() { memset(this, 0, sizeof(*this)); }
-
- // A cell is a 9 byte bit-field that stores 7 values:
- // location : 22 bits
- // id : 18 bits
- // timestamp : 20 bits
- // reuse : 4 bits
- // state : 3 bits
- // group : 3 bits
- // sum : 2 bits
- // The id is derived from the full hash of the entry.
- //
- // The actual layout is as follows:
- //
- // first_part (low order 32 bits):
- // 0000 0000 0011 1111 1111 1111 1111 1111 : location
- // 1111 1111 1100 0000 0000 0000 0000 0000 : id
- //
- // first_part (high order 32 bits):
- // 0000 0000 0000 0000 0000 0000 1111 1111 : id
- // 0000 1111 1111 1111 1111 1111 0000 0000 : timestamp
- // 1111 0000 0000 0000 0000 0000 0000 0000 : reuse
- //
- // last_part:
- // 0000 0111 : state
- // 0011 1000 : group
- // 1100 0000 : sum
- //
- // The small-cache version of the format moves some bits from the location to
- // the id fileds, like so:
- // location : 16 bits
- // id : 24 bits
- //
- // first_part (low order 32 bits):
- // 0000 0000 0000 0000 1111 1111 1111 1111 : location
- // 1111 1111 1111 1111 0000 0000 0000 0000 : id
- //
- // The actual bit distribution between location and id is determined by the
- // table size (IndexHeaderV3.table_len). Tables smaller than 65536 entries
- // use the small-cache version; after that size, caches should have the
- // SMALL_CACHE flag cleared.
- //
- // To locate a given entry after recovering the location from the cell, the
- // file type and file number are appended (see disk_cache/addr.h). For a large
- // table only the file type is implied; for a small table, the file number
- // is also implied, and it should be the first file for that type of entry,
- // as determined by the EntryGroup (two files in total, one for active entries
- // and another one for evicted entries).
- //
- // For example, a small table may store something like 0x1234 as the location
- // field. That means it stores the entry number 0x1234. If that record belongs
- // to a deleted entry, the regular cache address may look something like
- // BLOCK_EVICTED + 1 block + file number 6 + entry number 0x1234
- // so Addr = 0xf0061234
- //
- // If that same Addr is stored on a large table, the location field would be
- // 0x61234
-
- uint64_t first_part;
- uint8_t last_part;
-};
-static_assert(sizeof(IndexCell) == 9, "bad IndexCell");
-
-const int kCellsPerBucket = 4;
-struct IndexBucket {
- IndexCell cells[kCellsPerBucket];
- int32_t next;
- uint32_t hash; // The high order byte is reserved (should be zero).
-};
-static_assert(sizeof(IndexBucket) == 44, "bad IndexBucket");
-const int kBytesPerCell = 44 / kCellsPerBucket;
-
-// The main cache index. Backed by a file named index_tb1.
-// The extra table (index_tb2) has a similar format, but different size.
-struct Index {
- // Default size. Actual size controlled by header.table_len.
- IndexBucket table[kBaseTableLen / kCellsPerBucket];
-};
-#pragma pack(pop)
-
-// Flags that can be applied to an entry.
-enum EntryFlags {
- PARENT_ENTRY = 1, // This entry has children (sparse) entries.
- CHILD_ENTRY = 1 << 1 // Child entry that stores sparse data.
-};
-
-struct EntryRecord {
- uint32_t hash;
- uint32_t pad1;
- uint8_t reuse_count;
- uint8_t refetch_count;
- int8_t state; // Current EntryState.
- uint8_t flags; // Any combination of EntryFlags.
- int32_t key_len;
- int32_t data_size[4]; // We can store up to 4 data streams for each
- CacheAddr data_addr[4]; // entry.
- uint32_t data_hash[4];
- uint64_t creation_time;
- uint64_t last_modified_time;
- uint64_t last_access_time;
- int32_t pad[3];
- uint32_t self_hash;
-};
-static_assert(sizeof(EntryRecord) == 104, "bad EntryRecord");
-
-struct ShortEntryRecord {
- uint32_t hash;
- uint32_t pad1;
- uint8_t reuse_count;
- uint8_t refetch_count;
- int8_t state; // Current EntryState.
- uint8_t flags;
- int32_t key_len;
- uint64_t last_access_time;
- uint32_t long_hash[5];
- uint32_t self_hash;
-};
-static_assert(sizeof(ShortEntryRecord) == 48, "bad ShortEntryRecord");
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_DISK_FORMAT_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/entry_impl.cc b/chromium/net/disk_cache/blockfile/entry_impl.cc
index 60204641b9f..b20e10423c2 100644
--- a/chromium/net/disk_cache/blockfile/entry_impl.cc
+++ b/chromium/net/disk_cache/blockfile/entry_impl.cc
@@ -592,7 +592,7 @@ bool EntryImpl::SanityCheck() {
return false;
Addr next_addr(stored->next);
- if (next_addr.is_initialized() && !next_addr.SanityCheckForEntryV2()) {
+ if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
STRESS_NOTREACHED();
return false;
}
@@ -606,7 +606,7 @@ bool EntryImpl::SanityCheck() {
(stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
return false;
- if (!key_addr.SanityCheckV2())
+ if (!key_addr.SanityCheck())
return false;
if (key_addr.is_initialized() &&
@@ -639,7 +639,7 @@ bool EntryImpl::DataSanityCheck() {
return false;
if (!data_size && data_addr.is_initialized())
return false;
- if (!data_addr.SanityCheckV2())
+ if (!data_addr.SanityCheck())
return false;
if (!data_size)
continue;
@@ -664,7 +664,7 @@ void EntryImpl::FixForDelete() {
if (data_addr.is_initialized()) {
if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
(data_size > kMaxBlockSize && data_addr.is_block_file()) ||
- !data_addr.SanityCheckV2()) {
+ !data_addr.SanityCheck()) {
STRESS_NOTREACHED();
// The address is weird so don't attempt to delete it.
stored->data_addr[i] = 0;
@@ -902,7 +902,7 @@ bool EntryImpl::CouldBeSparse() const {
if (sparse_.get())
return true;
- scoped_ptr<SparseControl> sparse;
+ std::unique_ptr<SparseControl> sparse;
sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
return sparse->CouldBeSparse();
}
@@ -1505,7 +1505,7 @@ int EntryImpl::InitSparseData() {
return net::OK;
// Use a local variable so that sparse_ never goes from 'valid' to NULL.
- scoped_ptr<SparseControl> sparse(new SparseControl(this));
+ std::unique_ptr<SparseControl> sparse(new SparseControl(this));
int result = sparse->Init();
if (net::OK == result)
sparse_.swap(sparse);
diff --git a/chromium/net/disk_cache/blockfile/entry_impl.h b/chromium/net/disk_cache/blockfile/entry_impl.h
index 336ccc9c871..55f68e7c7c2 100644
--- a/chromium/net/disk_cache/blockfile/entry_impl.h
+++ b/chromium/net/disk_cache/blockfile/entry_impl.h
@@ -7,8 +7,9 @@
#include <stdint.h>
+#include <memory>
+
#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
#include "net/disk_cache/blockfile/disk_format.h"
#include "net/disk_cache/blockfile/storage_block-inl.h"
#include "net/disk_cache/blockfile/storage_block.h"
@@ -277,7 +278,7 @@ class NET_EXPORT_PRIVATE EntryImpl
CacheRankingsBlock node_; // Rankings related information for this entry.
base::WeakPtr<BackendImpl> backend_; // Back pointer to the cache.
base::WeakPtr<InFlightBackendIO> background_queue_; // In-progress queue.
- scoped_ptr<UserBuffer> user_buffers_[kNumStreams]; // Stores user data.
+ std::unique_ptr<UserBuffer> user_buffers_[kNumStreams]; // Stores user data.
// Files to store external user data and key.
scoped_refptr<File> files_[kNumStreams + 1];
mutable std::string key_; // Copy of the key.
@@ -285,7 +286,7 @@ class NET_EXPORT_PRIVATE EntryImpl
bool doomed_; // True if this entry was removed from the cache.
bool read_only_; // True if not yet writing.
bool dirty_; // True if we detected that this is a dirty entry.
- scoped_ptr<SparseControl> sparse_; // Support for sparse entries.
+ std::unique_ptr<SparseControl> sparse_; // Support for sparse entries.
net::BoundNetLog net_log_;
diff --git a/chromium/net/disk_cache/blockfile/entry_impl_v3.cc b/chromium/net/disk_cache/blockfile/entry_impl_v3.cc
deleted file mode 100644
index 883a6f24ade..00000000000
--- a/chromium/net/disk_cache/blockfile/entry_impl_v3.cc
+++ /dev/null
@@ -1,1483 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/entry_impl_v3.h"
-
-#include <limits>
-
-#include "base/hash.h"
-#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_util.h"
-#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
-#include "net/disk_cache/blockfile/backend_impl_v3.h"
-#include "net/disk_cache/blockfile/bitmap.h"
-#include "net/disk_cache/blockfile/disk_format_v3.h"
-#include "net/disk_cache/blockfile/histogram_macros_v3.h"
-#include "net/disk_cache/cache_util.h"
-#include "net/disk_cache/net_log_parameters.h"
-// #include "net/disk_cache/blockfile/sparse_control_v3.h"
-
-// Provide a BackendImpl object to macros from histogram_macros.h.
-#define CACHE_UMA_BACKEND_IMPL_OBJ backend_
-
-using base::Time;
-using base::TimeDelta;
-using base::TimeTicks;
-
-namespace {
-
-const int kMaxBufferSize = 1024 * 1024; // 1 MB.
-
-} // namespace
-
-namespace disk_cache {
-
-typedef StorageBlock<EntryRecord> CacheEntryBlockV3;
-typedef StorageBlock<ShortEntryRecord> CacheShortEntryBlock;
-
-// This class handles individual memory buffers that store data before it is
-// sent to disk. The buffer can start at any offset, but if we try to write to
-// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
-// zero. The buffer grows up to a size determined by the backend, to keep the
-// total memory used under control.
-class EntryImplV3::UserBuffer {
- public:
- explicit UserBuffer(BackendImplV3* backend)
- : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) {
- buffer_.reserve(kMaxBlockSize);
- }
- ~UserBuffer() {
- if (backend_)
- backend_->BufferDeleted(capacity() - kMaxBlockSize);
- }
-
- // Returns true if we can handle writing |len| bytes to |offset|.
- bool PreWrite(int offset, int len);
-
- // Truncates the buffer to |offset| bytes.
- void Truncate(int offset);
-
- // Writes |len| bytes from |buf| at the given |offset|.
- void Write(int offset, IOBuffer* buf, int len);
-
- // Returns true if we can read |len| bytes from |offset|, given that the
- // actual file has |eof| bytes stored. Note that the number of bytes to read
- // may be modified by this method even though it returns false: that means we
- // should do a smaller read from disk.
- bool PreRead(int eof, int offset, int* len);
-
- // Read |len| bytes from |buf| at the given |offset|.
- int Read(int offset, IOBuffer* buf, int len);
-
- // Prepare this buffer for reuse.
- void Reset();
-
- char* Data() { return buffer_.size() ? &buffer_[0] : NULL; }
- int Size() { return static_cast<int>(buffer_.size()); }
- int Start() { return offset_; }
- int End() { return offset_ + Size(); }
-
- private:
- int capacity() { return static_cast<int>(buffer_.capacity()); }
- bool GrowBuffer(int required, int limit);
-
- base::WeakPtr<BackendImplV3> backend_;
- int offset_;
- std::vector<char> buffer_;
- bool grow_allowed_;
- DISALLOW_COPY_AND_ASSIGN(UserBuffer);
-};
-
-bool EntryImplV3::UserBuffer::PreWrite(int offset, int len) {
- DCHECK_GE(offset, 0);
- DCHECK_GE(len, 0);
- DCHECK_GE(offset + len, 0);
-
- // We don't want to write before our current start.
- if (offset < offset_)
- return false;
-
- // Lets get the common case out of the way.
- if (offset + len <= capacity())
- return true;
-
- // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
- // buffer offset_ at 0.
- if (!Size() && offset > kMaxBlockSize)
- return GrowBuffer(len, kMaxBufferSize);
-
- int required = offset - offset_ + len;
- return GrowBuffer(required, kMaxBufferSize * 6 / 5);
-}
-
-void EntryImplV3::UserBuffer::Truncate(int offset) {
- DCHECK_GE(offset, 0);
- DCHECK_GE(offset, offset_);
- DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
-
- offset -= offset_;
- if (Size() >= offset)
- buffer_.resize(offset);
-}
-
-void EntryImplV3::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
- DCHECK_GE(offset, 0);
- DCHECK_GE(len, 0);
- DCHECK_GE(offset + len, 0);
- DCHECK_GE(offset, offset_);
- DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
-
- if (!Size() && offset > kMaxBlockSize)
- offset_ = offset;
-
- offset -= offset_;
-
- if (offset > Size())
- buffer_.resize(offset);
-
- if (!len)
- return;
-
- char* buffer = buf->data();
- int valid_len = Size() - offset;
- int copy_len = std::min(valid_len, len);
- if (copy_len) {
- memcpy(&buffer_[offset], buffer, copy_len);
- len -= copy_len;
- buffer += copy_len;
- }
- if (!len)
- return;
-
- buffer_.insert(buffer_.end(), buffer, buffer + len);
-}
-
-bool EntryImplV3::UserBuffer::PreRead(int eof, int offset, int* len) {
- DCHECK_GE(offset, 0);
- DCHECK_GT(*len, 0);
-
- if (offset < offset_) {
- // We are reading before this buffer.
- if (offset >= eof)
- return true;
-
- // If the read overlaps with the buffer, change its length so that there is
- // no overlap.
- *len = std::min(*len, offset_ - offset);
- *len = std::min(*len, eof - offset);
-
- // We should read from disk.
- return false;
- }
-
- if (!Size())
- return false;
-
- // See if we can fulfill the first part of the operation.
- return (offset - offset_ < Size());
-}
-
-int EntryImplV3::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
- DCHECK_GE(offset, 0);
- DCHECK_GT(len, 0);
- DCHECK(Size() || offset < offset_);
-
- int clean_bytes = 0;
- if (offset < offset_) {
- // We don't have a file so lets fill the first part with 0.
- clean_bytes = std::min(offset_ - offset, len);
- memset(buf->data(), 0, clean_bytes);
- if (len == clean_bytes)
- return len;
- offset = offset_;
- len -= clean_bytes;
- }
-
- int start = offset - offset_;
- int available = Size() - start;
- DCHECK_GE(start, 0);
- DCHECK_GE(available, 0);
- len = std::min(len, available);
- memcpy(buf->data() + clean_bytes, &buffer_[start], len);
- return len + clean_bytes;
-}
-
-void EntryImplV3::UserBuffer::Reset() {
- if (!grow_allowed_) {
- if (backend_)
- backend_->BufferDeleted(capacity() - kMaxBlockSize);
- grow_allowed_ = true;
- std::vector<char> tmp;
- buffer_.swap(tmp);
- buffer_.reserve(kMaxBlockSize);
- }
- offset_ = 0;
- buffer_.clear();
-}
-
-bool EntryImplV3::UserBuffer::GrowBuffer(int required, int limit) {
- DCHECK_GE(required, 0);
- int current_size = capacity();
- if (required <= current_size)
- return true;
-
- if (required > limit)
- return false;
-
- if (!backend_)
- return false;
-
- int to_add = std::max(required - current_size, kMaxBlockSize * 4);
- to_add = std::max(current_size, to_add);
- required = std::min(current_size + to_add, limit);
-
- grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
- if (!grow_allowed_)
- return false;
-
- DVLOG(3) << "Buffer grow to " << required;
-
- buffer_.reserve(required);
- return true;
-}
-
-// ------------------------------------------------------------------------
-
-EntryImplV3::EntryImplV3(BackendImplV3* backend, Addr address, bool read_only)
- : backend_(backend->GetWeakPtr()),
- address_(address),
- doomed_(false),
- read_only_(read_only),
- dirty_(true),
- modified_(false) {
- for (int i = 0; i < kNumStreams; i++) {
- unreported_size_[i] = 0;
- }
-}
-
-#if defined(V3_NOT_JUST_YET_READY)
-
-bool EntryImplV3::CreateEntry(Addr node_address,
- const std::string& key,
- uint32_t hash) {
- Trace("Create entry In");
- EntryStore* entry_store = entry_.Data();
- RankingsNode* node = node_.Data();
- memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
- memset(node, 0, sizeof(RankingsNode));
- if (!node_.LazyInit(backend_->File(node_address), node_address))
- return false;
-
- entry_store->rankings_node = node_address.value();
- node->contents = entry_.address().value();
-
- entry_store->hash = hash;
- entry_store->creation_time = Time::Now().ToInternalValue();
- entry_store->key_len = static_cast<int32_t>(key.size());
- if (entry_store->key_len > kMaxInternalKeyLength) {
- Addr address(0);
- if (!CreateBlock(entry_store->key_len + 1, &address))
- return false;
-
- entry_store->long_key = address.value();
- File* key_file = GetBackingFile(address, kKeyFileIndex);
- key_ = key;
-
- size_t offset = 0;
- if (address.is_block_file())
- offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
-
- if (!key_file || !key_file->Write(key.data(), key.size(), offset)) {
- DeleteData(address, kKeyFileIndex);
- return false;
- }
-
- if (address.is_separate_file())
- key_file->SetLength(key.size() + 1);
- } else {
- memcpy(entry_store->key, key.data(), key.size());
- entry_store->key[key.size()] = '\0';
- }
- backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size()));
- CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32_t>(key.size()));
- node->dirty = backend_->GetCurrentEntryId();
- Log("Create Entry ");
- return true;
-}
-
-uint32_t EntryImplV3::GetHash() {
- return entry_.Data()->hash;
-}
-
-bool EntryImplV3::IsSameEntry(const std::string& key, uint32_t hash) {
- if (entry_.Data()->hash != hash ||
- static_cast<size_t>(entry_.Data()->key_len) != key.size())
- return false;
-
- return (key.compare(GetKey()) == 0);
-}
-
-void EntryImplV3::InternalDoom() {
- net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
- DCHECK(node_.HasData());
- if (!node_.Data()->dirty) {
- node_.Data()->dirty = backend_->GetCurrentEntryId();
- node_.Store();
- }
- doomed_ = true;
-}
-
-// This only includes checks that relate to the first block of the entry (the
-// first 256 bytes), and values that should be set from the entry creation.
-// Basically, even if there is something wrong with this entry, we want to see
-// if it is possible to load the rankings node and delete them together.
-bool EntryImplV3::SanityCheck() {
- if (!entry_.VerifyHash())
- return false;
-
- EntryStore* stored = entry_.Data();
- if (!stored->rankings_node || stored->key_len <= 0)
- return false;
-
- if (stored->reuse_count < 0 || stored->refetch_count < 0)
- return false;
-
- Addr rankings_addr(stored->rankings_node);
- if (!rankings_addr.SanityCheckForRankings())
- return false;
-
- Addr next_addr(stored->next);
- if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
- STRESS_NOTREACHED();
- return false;
- }
- STRESS_DCHECK(next_addr.value() != entry_.address().value());
-
- if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
- return false;
-
- Addr key_addr(stored->long_key);
- if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
- (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
- return false;
-
- if (!key_addr.SanityCheck())
- return false;
-
- if (key_addr.is_initialized() &&
- ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
- (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
- return false;
-
- int num_blocks = NumBlocksForEntry(stored->key_len);
- if (entry_.address().num_blocks() != num_blocks)
- return false;
-
- return true;
-}
-
-bool EntryImplV3::DataSanityCheck() {
- EntryStore* stored = entry_.Data();
- Addr key_addr(stored->long_key);
-
- // The key must be NULL terminated.
- if (!key_addr.is_initialized() && stored->key[stored->key_len])
- return false;
-
- if (stored->hash != base::Hash(GetKey()))
- return false;
-
- for (int i = 0; i < kNumStreams; i++) {
- Addr data_addr(stored->data_addr[i]);
- int data_size = stored->data_size[i];
- if (data_size < 0)
- return false;
- if (!data_size && data_addr.is_initialized())
- return false;
- if (!data_addr.SanityCheck())
- return false;
- if (!data_size)
- continue;
- if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
- return false;
- if (data_size > kMaxBlockSize && data_addr.is_block_file())
- return false;
- }
- return true;
-}
-
-void EntryImplV3::FixForDelete() {
- EntryStore* stored = entry_.Data();
- Addr key_addr(stored->long_key);
-
- if (!key_addr.is_initialized())
- stored->key[stored->key_len] = '\0';
-
- for (int i = 0; i < kNumStreams; i++) {
- Addr data_addr(stored->data_addr[i]);
- int data_size = stored->data_size[i];
- if (data_addr.is_initialized()) {
- if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
- (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
- !data_addr.SanityCheck()) {
- STRESS_NOTREACHED();
- // The address is weird so don't attempt to delete it.
- stored->data_addr[i] = 0;
- // In general, trust the stored size as it should be in sync with the
- // total size tracked by the backend.
- }
- }
- if (data_size < 0)
- stored->data_size[i] = 0;
- }
- entry_.Store();
-}
-
-void EntryImplV3::SetTimes(base::Time last_used, base::Time last_modified) {
- node_.Data()->last_used = last_used.ToInternalValue();
- node_.Data()->last_modified = last_modified.ToInternalValue();
- node_.set_modified();
-}
-
-void EntryImplV3::BeginLogging(net::NetLog* net_log, bool created) {
- DCHECK(!net_log_.net_log());
- net_log_ = net::BoundNetLog::Make(
- net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
- net_log_.BeginEvent(
- net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
- CreateNetLogEntryCreationCallback(this, created));
-}
-
-const net::BoundNetLog& EntryImplV3::net_log() const {
- return net_log_;
-}
-
-// ------------------------------------------------------------------------
-
-void EntryImplV3::Doom() {
- if (background_queue_)
- background_queue_->DoomEntryImpl(this);
-}
-
-void EntryImplV3::DoomImpl() {
- if (doomed_ || !backend_)
- return;
-
- SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
- backend_->InternalDoomEntry(this);
-}
-
-void EntryImplV3::Close() {
- if (background_queue_)
- background_queue_->CloseEntryImpl(this);
-}
-
-std::string EntryImplV3::GetKey() const {
- CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
- int key_len = entry->Data()->key_len;
- if (key_len <= kMaxInternalKeyLength)
- return std::string(entry->Data()->key);
-
- // We keep a copy of the key so that we can always return it, even if the
- // backend is disabled.
- if (!key_.empty())
- return key_;
-
- Addr address(entry->Data()->long_key);
- DCHECK(address.is_initialized());
- size_t offset = 0;
- if (address.is_block_file())
- offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
-
- static_assert(kNumStreams == kKeyFileIndex, "invalid key index");
- File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
- kKeyFileIndex);
- if (!key_file)
- return std::string();
-
- ++key_len; // We store a trailing \0 on disk that we read back below.
- if (!offset && key_file->GetLength() != static_cast<size_t>(key_len))
- return std::string();
-
- if (!key_file->Read(base::WriteInto(&key_, key_len), key_len, offset))
- key_.clear();
- return key_;
-}
-
-Time EntryImplV3::GetLastUsed() const {
- CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
- return Time::FromInternalValue(node->Data()->last_used);
-}
-
-Time EntryImplV3::GetLastModified() const {
- CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
- return Time::FromInternalValue(node->Data()->last_modified);
-}
-
-int32_t EntryImplV3::GetDataSize(int index) const {
- if (index < 0 || index >= kNumStreams)
- return 0;
-
- CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
- return entry->Data()->data_size[index];
-}
-
-int EntryImplV3::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
- const CompletionCallback& callback) {
- if (callback.is_null())
- return ReadDataImpl(index, offset, buf, buf_len, callback);
-
- DCHECK(node_.Data()->dirty || read_only_);
- if (index < 0 || index >= kNumStreams)
- return net::ERR_INVALID_ARGUMENT;
-
- int entry_size = entry_.Data()->data_size[index];
- if (offset >= entry_size || offset < 0 || !buf_len)
- return 0;
-
- if (buf_len < 0)
- return net::ERR_INVALID_ARGUMENT;
-
- if (!background_queue_)
- return net::ERR_UNEXPECTED;
-
- background_queue_->ReadData(this, index, offset, buf, buf_len, callback);
- return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
- const CompletionCallback& callback) {
- if (net_log_.IsCapturing()) {
- net_log_.BeginEvent(
- net::NetLog::TYPE_ENTRY_READ_DATA,
- CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
- }
-
- int result = InternalReadData(index, offset, buf, buf_len, callback);
-
- if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
- net_log_.EndEvent(
- net::NetLog::TYPE_ENTRY_READ_DATA,
- CreateNetLogReadWriteCompleteCallback(result));
- }
- return result;
-}
-
-int EntryImplV3::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
- const CompletionCallback& callback, bool truncate) {
- if (callback.is_null())
- return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
-
- DCHECK(node_.Data()->dirty || read_only_);
- if (index < 0 || index >= kNumStreams)
- return net::ERR_INVALID_ARGUMENT;
-
- if (offset < 0 || buf_len < 0)
- return net::ERR_INVALID_ARGUMENT;
-
- if (!background_queue_)
- return net::ERR_UNEXPECTED;
-
- background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
- callback);
- return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
- const CompletionCallback& callback,
- bool truncate) {
- if (net_log_.IsCapturing()) {
- net_log_.BeginEvent(
- net::NetLog::TYPE_ENTRY_WRITE_DATA,
- CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
- }
-
- int result = InternalWriteData(index, offset, buf, buf_len, callback,
- truncate);
-
- if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
- net_log_.EndEvent(
- net::NetLog::TYPE_ENTRY_WRITE_DATA,
- CreateNetLogReadWriteCompleteCallback(result));
- }
- return result;
-}
-
-int EntryImplV3::ReadSparseData(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) {
- if (callback.is_null())
- return ReadSparseDataImpl(offset, buf, buf_len, callback);
-
- if (!background_queue_)
- return net::ERR_UNEXPECTED;
-
- background_queue_->ReadSparseData(this, offset, buf, buf_len, callback);
- return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::ReadSparseDataImpl(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) {
- DCHECK(node_.Data()->dirty || read_only_);
- int result = InitSparseData();
- if (net::OK != result)
- return result;
-
- TimeTicks start = TimeTicks::Now();
- result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
- callback);
- ReportIOTime(kSparseRead, start);
- return result;
-}
-
-int EntryImplV3::WriteSparseData(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) {
- if (callback.is_null())
- return WriteSparseDataImpl(offset, buf, buf_len, callback);
-
- if (!background_queue_)
- return net::ERR_UNEXPECTED;
-
- background_queue_->WriteSparseData(this, offset, buf, buf_len, callback);
- return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::WriteSparseDataImpl(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) {
- DCHECK(node_.Data()->dirty || read_only_);
- int result = InitSparseData();
- if (net::OK != result)
- return result;
-
- TimeTicks start = TimeTicks::Now();
- result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
- buf_len, callback);
- ReportIOTime(kSparseWrite, start);
- return result;
-}
-
-int EntryImplV3::GetAvailableRange(int64_t offset,
- int len,
- int64_t* start,
- const CompletionCallback& callback) {
- if (!background_queue_)
- return net::ERR_UNEXPECTED;
-
- background_queue_->GetAvailableRange(this, offset, len, start, callback);
- return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::GetAvailableRangeImpl(int64_t offset, int len, int64_t* start) {
- int result = InitSparseData();
- if (net::OK != result)
- return result;
-
- return sparse_->GetAvailableRange(offset, len, start);
-}
-
-bool EntryImplV3::CouldBeSparse() const {
- if (sparse_.get())
- return true;
-
- scoped_ptr<SparseControl> sparse;
- sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
- return sparse->CouldBeSparse();
-}
-
-void EntryImplV3::CancelSparseIO() {
- if (background_queue_)
- background_queue_->CancelSparseIO(this);
-}
-
-void EntryImplV3::CancelSparseIOImpl() {
- if (!sparse_.get())
- return;
-
- sparse_->CancelIO();
-}
-
-int EntryImplV3::ReadyForSparseIO(const CompletionCallback& callback) {
- if (!sparse_.get())
- return net::OK;
-
- if (!background_queue_)
- return net::ERR_UNEXPECTED;
-
- background_queue_->ReadyForSparseIO(this, callback);
- return net::ERR_IO_PENDING;
-}
-
-int EntryImplV3::ReadyForSparseIOImpl(const CompletionCallback& callback) {
- DCHECK(sparse_.get());
- return sparse_->ReadyToUse(callback);
-}
-
-// ------------------------------------------------------------------------
-
-// When an entry is deleted from the cache, we clean up all the data associated
-// with it for two reasons: to simplify the reuse of the block (we know that any
-// unused block is filled with zeros), and to simplify the handling of write /
-// read partial information from an entry (don't have to worry about returning
-// data related to a previous cache entry because the range was not fully
-// written before).
-EntryImplV3::~EntryImplV3() {
- if (!backend_) {
- entry_.clear_modified();
- node_.clear_modified();
- return;
- }
- Log("~EntryImpl in");
-
- // Save the sparse info to disk. This will generate IO for this entry and
- // maybe for a child entry, so it is important to do it before deleting this
- // entry.
- sparse_.reset();
-
- // Remove this entry from the list of open entries.
- backend_->OnEntryDestroyBegin(entry_.address());
-
- if (doomed_) {
- DeleteEntryData(true);
- } else {
-#if defined(NET_BUILD_STRESS_CACHE)
- SanityCheck();
-#endif
- net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE);
- bool ret = true;
- for (int index = 0; index < kNumStreams; index++) {
- if (user_buffers_[index].get()) {
- if (!(ret = Flush(index, 0)))
- LOG(ERROR) << "Failed to save user data";
- }
- if (unreported_size_[index]) {
- backend_->ModifyStorageSize(
- entry_.Data()->data_size[index] - unreported_size_[index],
- entry_.Data()->data_size[index]);
- }
- }
-
- if (!ret) {
- // There was a failure writing the actual data. Mark the entry as dirty.
- int current_id = backend_->GetCurrentEntryId();
- node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
- node_.Store();
- } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
- node_.Data()->dirty = 0;
- node_.Store();
- }
- }
-
- Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
- net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL);
- backend_->OnEntryDestroyEnd();
-}
-
-int EntryImpl::InternalReadData(int index, int offset,
- IOBuffer* buf, int buf_len,
- const CompletionCallback& callback) {
- DCHECK(node_.Data()->dirty || read_only_);
- DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
- if (index < 0 || index >= kNumStreams)
- return net::ERR_INVALID_ARGUMENT;
-
- int entry_size = entry_.Data()->data_size[index];
- if (offset >= entry_size || offset < 0 || !buf_len)
- return 0;
-
- if (buf_len < 0)
- return net::ERR_INVALID_ARGUMENT;
-
- if (!backend_)
- return net::ERR_UNEXPECTED;
-
- TimeTicks start = TimeTicks::Now();
-
- if (offset + buf_len > entry_size)
- buf_len = entry_size - offset;
-
- UpdateRank(false);
-
- backend_->OnEvent(Stats::READ_DATA);
- backend_->OnRead(buf_len);
-
- Addr address(entry_.Data()->data_addr[index]);
- int eof = address.is_initialized() ? entry_size : 0;
- if (user_buffers_[index].get() &&
- user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
- // Complete the operation locally.
- buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
- ReportIOTime(kRead, start);
- return buf_len;
- }
-
- address.set_value(entry_.Data()->data_addr[index]);
- DCHECK(address.is_initialized());
- if (!address.is_initialized()) {
- DoomImpl();
- return net::ERR_FAILED;
- }
-
- File* file = GetBackingFile(address, index);
- if (!file) {
- DoomImpl();
- LOG(ERROR) << "No file for " << std::hex << address.value();
- return net::ERR_FILE_NOT_FOUND;
- }
-
- size_t file_offset = offset;
- if (address.is_block_file()) {
- DCHECK_LE(offset + buf_len, kMaxBlockSize);
- file_offset += address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
- }
-
- SyncCallback* io_callback = NULL;
- if (!callback.is_null()) {
- io_callback = new SyncCallback(this, buf, callback,
- net::NetLog::TYPE_ENTRY_READ_DATA);
- }
-
- TimeTicks start_async = TimeTicks::Now();
-
- bool completed;
- if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
- if (io_callback)
- io_callback->Discard();
- DoomImpl();
- return net::ERR_CACHE_READ_FAILURE;
- }
-
- if (io_callback && completed)
- io_callback->Discard();
-
- if (io_callback)
- ReportIOTime(kReadAsync1, start_async);
-
- ReportIOTime(kRead, start);
- return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
-}
-
-int EntryImpl::InternalWriteData(int index, int offset,
- IOBuffer* buf, int buf_len,
- const CompletionCallback& callback,
- bool truncate) {
- DCHECK(node_.Data()->dirty || read_only_);
- DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
- if (index < 0 || index >= kNumStreams)
- return net::ERR_INVALID_ARGUMENT;
-
- if (offset < 0 || buf_len < 0)
- return net::ERR_INVALID_ARGUMENT;
-
- if (!backend_)
- return net::ERR_UNEXPECTED;
-
- int max_file_size = backend_->MaxFileSize();
-
- // offset or buf_len could be negative numbers.
- if (offset > max_file_size || buf_len > max_file_size ||
- offset + buf_len > max_file_size) {
- int size = offset + buf_len;
- if (size <= max_file_size)
- size = std::numeric_limits<int32_t>::max();
- backend_->TooMuchStorageRequested(size);
- return net::ERR_FAILED;
- }
-
- TimeTicks start = TimeTicks::Now();
-
- // Read the size at this point (it may change inside prepare).
- int entry_size = entry_.Data()->data_size[index];
- bool extending = entry_size < offset + buf_len;
- truncate = truncate && entry_size > offset + buf_len;
- Trace("To PrepareTarget 0x%x", entry_.address().value());
- if (!PrepareTarget(index, offset, buf_len, truncate))
- return net::ERR_FAILED;
-
- Trace("From PrepareTarget 0x%x", entry_.address().value());
- if (extending || truncate)
- UpdateSize(index, entry_size, offset + buf_len);
-
- UpdateRank(true);
-
- backend_->OnEvent(Stats::WRITE_DATA);
- backend_->OnWrite(buf_len);
-
- if (user_buffers_[index].get()) {
- // Complete the operation locally.
- user_buffers_[index]->Write(offset, buf, buf_len);
- ReportIOTime(kWrite, start);
- return buf_len;
- }
-
- Addr address(entry_.Data()->data_addr[index]);
- if (offset + buf_len == 0) {
- if (truncate) {
- DCHECK(!address.is_initialized());
- }
- return 0;
- }
-
- File* file = GetBackingFile(address, index);
- if (!file)
- return net::ERR_FILE_NOT_FOUND;
-
- size_t file_offset = offset;
- if (address.is_block_file()) {
- DCHECK_LE(offset + buf_len, kMaxBlockSize);
- file_offset += address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
- } else if (truncate || (extending && !buf_len)) {
- if (!file->SetLength(offset + buf_len))
- return net::ERR_FAILED;
- }
-
- if (!buf_len)
- return 0;
-
- SyncCallback* io_callback = NULL;
- if (!callback.is_null()) {
- io_callback = new SyncCallback(this, buf, callback,
- net::NetLog::TYPE_ENTRY_WRITE_DATA);
- }
-
- TimeTicks start_async = TimeTicks::Now();
-
- bool completed;
- if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
- &completed)) {
- if (io_callback)
- io_callback->Discard();
- return net::ERR_CACHE_WRITE_FAILURE;
- }
-
- if (io_callback && completed)
- io_callback->Discard();
-
- if (io_callback)
- ReportIOTime(kWriteAsync1, start_async);
-
- ReportIOTime(kWrite, start);
- return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
-}
-
-// ------------------------------------------------------------------------
-
-bool EntryImpl::CreateDataBlock(int index, int size) {
- DCHECK(index >= 0 && index < kNumStreams);
-
- Addr address(entry_.Data()->data_addr[index]);
- if (!CreateBlock(size, &address))
- return false;
-
- entry_.Data()->data_addr[index] = address.value();
- entry_.Store();
- return true;
-}
-
-bool EntryImpl::CreateBlock(int size, Addr* address) {
- DCHECK(!address->is_initialized());
- if (!backend_)
- return false;
-
- FileType file_type = Addr::RequiredFileType(size);
- if (EXTERNAL == file_type) {
- if (size > backend_->MaxFileSize())
- return false;
- if (!backend_->CreateExternalFile(address))
- return false;
- } else {
- int num_blocks = Addr::RequiredBlocks(size, file_type);
-
- if (!backend_->CreateBlock(file_type, num_blocks, address))
- return false;
- }
- return true;
-}
-
-// Note that this method may end up modifying a block file so upon return the
-// involved block will be free, and could be reused for something else. If there
-// is a crash after that point (and maybe before returning to the caller), the
-// entry will be left dirty... and at some point it will be discarded; it is
-// important that the entry doesn't keep a reference to this address, or we'll
-// end up deleting the contents of |address| once again.
-void EntryImpl::DeleteData(Addr address, int index) {
- DCHECK(backend_);
- if (!address.is_initialized())
- return;
- if (address.is_separate_file()) {
- int failure = !DeleteCacheFile(backend_->GetFileName(address));
- CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
- if (failure) {
- LOG(ERROR) << "Failed to delete " <<
- backend_->GetFileName(address).value() << " from the cache.";
- }
- if (files_[index])
- files_[index] = NULL; // Releases the object.
- } else {
- backend_->DeleteBlock(address, true);
- }
-}
-
-void EntryImpl::UpdateRank(bool modified) {
- if (!backend_)
- return;
-
- if (!doomed_) {
- // Everything is handled by the backend.
- backend_->UpdateRank(this, modified);
- return;
- }
-
- Time current = Time::Now();
- node_.Data()->last_used = current.ToInternalValue();
-
- if (modified)
- node_.Data()->last_modified = current.ToInternalValue();
-}
-
-void EntryImpl::DeleteEntryData(bool everything) {
- DCHECK(doomed_ || !everything);
-
- if (GetEntryFlags() & PARENT_ENTRY) {
- // We have some child entries that must go away.
- SparseControl::DeleteChildren(this);
- }
-
- if (GetDataSize(0))
- CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
- if (GetDataSize(1))
- CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
- for (int index = 0; index < kNumStreams; index++) {
- Addr address(entry_.Data()->data_addr[index]);
- if (address.is_initialized()) {
- backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
- unreported_size_[index], 0);
- entry_.Data()->data_addr[index] = 0;
- entry_.Data()->data_size[index] = 0;
- entry_.Store();
- DeleteData(address, index);
- }
- }
-
- if (!everything)
- return;
-
- // Remove all traces of this entry.
- backend_->RemoveEntry(this);
-
- // Note that at this point node_ and entry_ are just two blocks of data, and
- // even if they reference each other, nobody should be referencing them.
-
- Addr address(entry_.Data()->long_key);
- DeleteData(address, kKeyFileIndex);
- backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
-
- backend_->DeleteBlock(entry_.address(), true);
- entry_.Discard();
-
- if (!LeaveRankingsBehind()) {
- backend_->DeleteBlock(node_.address(), true);
- node_.Discard();
- }
-}
-
-// We keep a memory buffer for everything that ends up stored on a block file
-// (because we don't know yet the final data size), and for some of the data
-// that end up on external files. This function will initialize that memory
-// buffer and / or the files needed to store the data.
-//
-// In general, a buffer may overlap data already stored on disk, and in that
-// case, the contents of the buffer are the most accurate. It may also extend
-// the file, but we don't want to read from disk just to keep the buffer up to
-// date. This means that as soon as there is a chance to get confused about what
-// is the most recent version of some part of a file, we'll flush the buffer and
-// reuse it for the new data. Keep in mind that the normal use pattern is quite
-// simple (write sequentially from the beginning), so we optimize for handling
-// that case.
-bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
- bool truncate) {
- if (truncate)
- return HandleTruncation(index, offset, buf_len);
-
- if (!offset && !buf_len)
- return true;
-
- Addr address(entry_.Data()->data_addr[index]);
- if (address.is_initialized()) {
- if (address.is_block_file() && !MoveToLocalBuffer(index))
- return false;
-
- if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
- // We are about to create a buffer for the first 16KB, make sure that we
- // preserve existing data.
- if (!CopyToLocalBuffer(index))
- return false;
- }
- }
-
- if (!user_buffers_[index].get())
- user_buffers_[index].reset(new UserBuffer(backend_.get()));
-
- return PrepareBuffer(index, offset, buf_len);
-}
-
-// We get to this function with some data already stored. If there is a
-// truncation that results on data stored internally, we'll explicitly
-// handle the case here.
-bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
- Addr address(entry_.Data()->data_addr[index]);
-
- int current_size = entry_.Data()->data_size[index];
- int new_size = offset + buf_len;
-
- if (!new_size) {
- // This is by far the most common scenario.
- backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
- entry_.Data()->data_addr[index] = 0;
- entry_.Data()->data_size[index] = 0;
- unreported_size_[index] = 0;
- entry_.Store();
- DeleteData(address, index);
-
- user_buffers_[index].reset();
- return true;
- }
-
- // We never postpone truncating a file, if there is one, but we may postpone
- // telling the backend about the size reduction.
- if (user_buffers_[index].get()) {
- DCHECK_GE(current_size, user_buffers_[index]->Start());
- if (!address.is_initialized()) {
- // There is no overlap between the buffer and disk.
- if (new_size > user_buffers_[index]->Start()) {
- // Just truncate our buffer.
- DCHECK_LT(new_size, user_buffers_[index]->End());
- user_buffers_[index]->Truncate(new_size);
- return true;
- }
-
- // Just discard our buffer.
- user_buffers_[index]->Reset();
- return PrepareBuffer(index, offset, buf_len);
- }
-
- // There is some overlap or we need to extend the file before the
- // truncation.
- if (offset > user_buffers_[index]->Start())
- user_buffers_[index]->Truncate(new_size);
- UpdateSize(index, current_size, new_size);
- if (!Flush(index, 0))
- return false;
- user_buffers_[index].reset();
- }
-
- // We have data somewhere, and it is not in a buffer.
- DCHECK(!user_buffers_[index].get());
- DCHECK(address.is_initialized());
-
- if (new_size > kMaxBlockSize)
- return true; // Let the operation go directly to disk.
-
- return ImportSeparateFile(index, offset + buf_len);
-}
-
-bool EntryImpl::CopyToLocalBuffer(int index) {
- Addr address(entry_.Data()->data_addr[index]);
- DCHECK(!user_buffers_[index].get());
- DCHECK(address.is_initialized());
-
- int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
- user_buffers_[index].reset(new UserBuffer(backend_.get()));
- user_buffers_[index]->Write(len, NULL, 0);
-
- File* file = GetBackingFile(address, index);
- int offset = 0;
-
- if (address.is_block_file())
- offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
-
- if (!file ||
- !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
- user_buffers_[index].reset();
- return false;
- }
- return true;
-}
-
-bool EntryImpl::MoveToLocalBuffer(int index) {
- if (!CopyToLocalBuffer(index))
- return false;
-
- Addr address(entry_.Data()->data_addr[index]);
- entry_.Data()->data_addr[index] = 0;
- entry_.Store();
- DeleteData(address, index);
-
- // If we lose this entry we'll see it as zero sized.
- int len = entry_.Data()->data_size[index];
- backend_->ModifyStorageSize(len - unreported_size_[index], 0);
- unreported_size_[index] = len;
- return true;
-}
-
-bool EntryImpl::ImportSeparateFile(int index, int new_size) {
- if (entry_.Data()->data_size[index] > new_size)
- UpdateSize(index, entry_.Data()->data_size[index], new_size);
-
- return MoveToLocalBuffer(index);
-}
-
-bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
- DCHECK(user_buffers_[index].get());
- if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
- offset > entry_.Data()->data_size[index]) {
- // We are about to extend the buffer or the file (with zeros), so make sure
- // that we are not overwriting anything.
- Addr address(entry_.Data()->data_addr[index]);
- if (address.is_initialized() && address.is_separate_file()) {
- if (!Flush(index, 0))
- return false;
- // There is an actual file already, and we don't want to keep track of
- // its length so we let this operation go straight to disk.
- // The only case when a buffer is allowed to extend the file (as in fill
- // with zeros before the start) is when there is no file yet to extend.
- user_buffers_[index].reset();
- return true;
- }
- }
-
- if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
- if (!Flush(index, offset + buf_len))
- return false;
-
- // Lets try again.
- if (offset > user_buffers_[index]->End() ||
- !user_buffers_[index]->PreWrite(offset, buf_len)) {
- // We cannot complete the operation with a buffer.
- DCHECK(!user_buffers_[index]->Size());
- DCHECK(!user_buffers_[index]->Start());
- user_buffers_[index].reset();
- }
- }
- return true;
-}
-
-bool EntryImpl::Flush(int index, int min_len) {
- Addr address(entry_.Data()->data_addr[index]);
- DCHECK(user_buffers_[index].get());
- DCHECK(!address.is_initialized() || address.is_separate_file());
- DVLOG(3) << "Flush";
-
- int size = std::max(entry_.Data()->data_size[index], min_len);
- if (size && !address.is_initialized() && !CreateDataBlock(index, size))
- return false;
-
- if (!entry_.Data()->data_size[index]) {
- DCHECK(!user_buffers_[index]->Size());
- return true;
- }
-
- address.set_value(entry_.Data()->data_addr[index]);
-
- int len = user_buffers_[index]->Size();
- int offset = user_buffers_[index]->Start();
- if (!len && !offset)
- return true;
-
- if (address.is_block_file()) {
- DCHECK_EQ(len, entry_.Data()->data_size[index]);
- DCHECK(!offset);
- offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
- }
-
- File* file = GetBackingFile(address, index);
- if (!file)
- return false;
-
- if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL))
- return false;
- user_buffers_[index]->Reset();
-
- return true;
-}
-
-void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
- if (entry_.Data()->data_size[index] == new_size)
- return;
-
- unreported_size_[index] += new_size - old_size;
- entry_.Data()->data_size[index] = new_size;
- entry_.set_modified();
-}
-
-int EntryImpl::InitSparseData() {
- if (sparse_.get())
- return net::OK;
-
- // Use a local variable so that sparse_ never goes from 'valid' to NULL.
- scoped_ptr<SparseControl> sparse(new SparseControl(this));
- int result = sparse->Init();
- if (net::OK == result)
- sparse_.swap(sparse);
-
- return result;
-}
-
-void EntryImpl::SetEntryFlags(uint32_t flags) {
- entry_.Data()->flags |= flags;
- entry_.set_modified();
-}
-
-uint32_t EntryImpl::GetEntryFlags() {
- return entry_.Data()->flags;
-}
-
-void EntryImpl::GetData(int index, char** buffer, Addr* address) {
- DCHECK(backend_);
- if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
- !user_buffers_[index]->Start()) {
- // The data is already in memory, just copy it and we're done.
- int data_len = entry_.Data()->data_size[index];
- if (data_len <= user_buffers_[index]->Size()) {
- DCHECK(!user_buffers_[index]->Start());
- *buffer = new char[data_len];
- memcpy(*buffer, user_buffers_[index]->Data(), data_len);
- return;
- }
- }
-
- // Bad news: we'd have to read the info from disk so instead we'll just tell
- // the caller where to read from.
- *buffer = NULL;
- address->set_value(entry_.Data()->data_addr[index]);
- if (address->is_initialized()) {
- // Prevent us from deleting the block from the backing store.
- backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
- unreported_size_[index], 0);
- entry_.Data()->data_addr[index] = 0;
- entry_.Data()->data_size[index] = 0;
- }
-}
-
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-void EntryImplV3::ReportIOTime(Operation op, const base::TimeTicks& start) {
- if (!backend_)
- return;
-
- switch (op) {
- case kRead:
- CACHE_UMA(AGE_MS, "ReadTime", start);
- break;
- case kWrite:
- CACHE_UMA(AGE_MS, "WriteTime", start);
- break;
- case kSparseRead:
- CACHE_UMA(AGE_MS, "SparseReadTime", start);
- break;
- case kSparseWrite:
- CACHE_UMA(AGE_MS, "SparseWriteTime", start);
- break;
- case kAsyncIO:
- CACHE_UMA(AGE_MS, "AsyncIOTime", start);
- break;
- case kReadAsync1:
- CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", start);
- break;
- case kWriteAsync1:
- CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", start);
- break;
- default:
- NOTREACHED();
- }
-}
-
-void EntryImplV3::Log(const char* msg) {
- Trace("%s 0x%p 0x%x", msg, reinterpret_cast<void*>(this), address_);
- Trace(" data: 0x%x 0x%x", entry_->data_addr[0], entry_->data_addr[1]);
- Trace(" doomed: %d", doomed_);
-}
-
-void EntryImplV3::Doom() {
- NOTIMPLEMENTED();
-}
-
-void EntryImplV3::Close() {
- NOTIMPLEMENTED();
-}
-
-std::string EntryImplV3::GetKey() const {
- return std::string();
-}
-
-Time EntryImplV3::GetLastUsed() const {
- return Time();
-}
-
-Time EntryImplV3::GetLastModified() const {
- return Time();
-}
-
-int32_t EntryImplV3::GetDataSize(int index) const {
- return 0;
-}
-
-int EntryImplV3::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int EntryImplV3::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
- const CompletionCallback& callback, bool truncate) {
- return net::ERR_FAILED;
-}
-
-int EntryImplV3::ReadSparseData(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int EntryImplV3::WriteSparseData(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-int EntryImplV3::GetAvailableRange(int64_t offset,
- int len,
- int64_t* start,
- const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-bool EntryImplV3::CouldBeSparse() const {
- return false;
-}
-
-void EntryImplV3::CancelSparseIO() {
- NOTIMPLEMENTED();
-}
-
-int EntryImplV3::ReadyForSparseIO(const CompletionCallback& callback) {
- return net::ERR_FAILED;
-}
-
-EntryImplV3::~EntryImplV3() {
- NOTIMPLEMENTED();
-}
-
-} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/entry_impl_v3.h b/chromium/net/disk_cache/blockfile/entry_impl_v3.h
deleted file mode 100644
index a9a5df5170d..00000000000
--- a/chromium/net/disk_cache/blockfile/entry_impl_v3.h
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_ENTRY_IMPL_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_ENTRY_IMPL_V3_H_
-
-#include <stdint.h>
-
-#include <string>
-
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "net/disk_cache/blockfile/disk_format_v3.h"
-#include "net/disk_cache/blockfile/storage_block.h"
-#include "net/disk_cache/disk_cache.h"
-#include "net/log/net_log.h"
-
-namespace disk_cache {
-
-class BackendImplV3;
-class SparseControlV3;
-
-// This class implements the Entry interface. An object of this
-// class represents a single entry on the cache.
-class NET_EXPORT_PRIVATE EntryImplV3
- : public Entry,
- public base::RefCounted<EntryImplV3> {
- friend class base::RefCounted<EntryImplV3>;
- // friend class SparseControlV3;
- public:
- enum Operation {
- kRead,
- kWrite,
- kSparseRead,
- kSparseWrite,
- kAsyncIO,
- kReadAsync1,
- kWriteAsync1
- };
-
- EntryImplV3(BackendImplV3* backend, Addr address, bool read_only);
-
- // Performs the initialization of a EntryImplV3 that will be added to the
- // cache.
- bool CreateEntry(Addr node_address, const std::string& key, uint32_t hash);
-
- uint32_t GetHash();
-
- uint32_t GetHash() const;
- Addr GetAddress() const;
- int GetReuseCounter() const;
- void SetReuseCounter(int count);
- int GetRefetchCounter() const;
- void SetRefetchCounter(int count);
-
- // Returns true if this entry matches the lookup arguments.
- bool IsSameEntry(const std::string& key, uint32_t hash);
-
- // Permamently destroys this entry.
- void InternalDoom();
-
- // Returns false if the entry is clearly invalid.
- bool SanityCheck();
- bool DataSanityCheck();
-
- // Attempts to make this entry reachable though the key.
- void FixForDelete();
-
- // Set the access times for this entry. This method provides support for
- // the upgrade tool.
- void SetTimes(base::Time last_used, base::Time last_modified);
-
- // Logs a begin event and enables logging for the EntryImplV3. Will also cause
- // an end event to be logged on destruction. The EntryImplV3 must have its key
- // initialized before this is called. |created| is true if the Entry was
- // created rather than opened.
- void BeginLogging(net::NetLog* net_log, bool created);
-
- const net::BoundNetLog& net_log() const;
-
- // Entry interface.
- void Doom() override;
- void Close() override;
- std::string GetKey() const override;
- base::Time GetLastUsed() const override;
- base::Time GetLastModified() const override;
- int32_t GetDataSize(int index) const override;
- int ReadData(int index,
- int offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) override;
- int WriteData(int index,
- int offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback,
- bool truncate) override;
- int ReadSparseData(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) override;
- int WriteSparseData(int64_t offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) override;
- int GetAvailableRange(int64_t offset,
- int len,
- int64_t* start,
- const CompletionCallback& callback) override;
- bool CouldBeSparse() const override;
- void CancelSparseIO() override;
- int ReadyForSparseIO(const CompletionCallback& callback) override;
-
- private:
- enum {
- kNumStreams = 3
- };
- class UserBuffer;
-
- ~EntryImplV3() override;
-
- // Do all the work for ReadDataImpl and WriteDataImpl. Implemented as
- // separate functions to make logging of results simpler.
- int InternalReadData(int index, int offset, IOBuffer* buf,
- int buf_len, const CompletionCallback& callback);
- int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len,
- const CompletionCallback& callback, bool truncate);
-
- // Initializes the storage for an internal or external data block.
- bool CreateDataBlock(int index, int size);
-
- // Initializes the storage for an internal or external generic block.
- bool CreateBlock(int size, Addr* address);
-
- // Deletes the data pointed by address, maybe backed by files_[index].
- // Note that most likely the caller should delete (and store) the reference to
- // |address| *before* calling this method because we don't want to have an
- // entry using an address that is already free.
- void DeleteData(Addr address, int index);
-
- // Updates ranking information.
- void UpdateRank(bool modified);
-
- // Deletes this entry from disk. If |everything| is false, only the user data
- // will be removed, leaving the key and control data intact.
- void DeleteEntryData(bool everything);
-
- // Prepares the target file or buffer for a write of buf_len bytes at the
- // given offset.
- bool PrepareTarget(int index, int offset, int buf_len, bool truncate);
-
- // Adjusts the internal buffer and file handle for a write that truncates this
- // stream.
- bool HandleTruncation(int index, int offset, int buf_len);
-
- // Copies data from disk to the internal buffer.
- bool CopyToLocalBuffer(int index);
-
- // Reads from a block data file to this object's memory buffer.
- bool MoveToLocalBuffer(int index);
-
- // Loads the external file to this object's memory buffer.
- bool ImportSeparateFile(int index, int new_size);
-
- // Makes sure that the internal buffer can handle the a write of |buf_len|
- // bytes to |offset|.
- bool PrepareBuffer(int index, int offset, int buf_len);
-
- // Flushes the in-memory data to the backing storage. The data destination
- // is determined based on the current data length and |min_len|.
- bool Flush(int index, int min_len);
-
- // Updates the size of a given data stream.
- void UpdateSize(int index, int old_size, int new_size);
-
- // Initializes the sparse control object. Returns a net error code.
- int InitSparseData();
-
- // Adds the provided |flags| to the current EntryFlags for this entry.
- void SetEntryFlags(uint32_t flags);
-
- // Returns the current EntryFlags for this entry.
- uint32_t GetEntryFlags();
-
- // Gets the data stored at the given index. If the information is in memory,
- // a buffer will be allocated and the data will be copied to it (the caller
- // can find out the size of the buffer before making this call). Otherwise,
- // the cache address of the data will be returned, and that address will be
- // removed from the regular book keeping of this entry so the caller is
- // responsible for deleting the block (or file) from the backing store at some
- // point; there is no need to report any storage-size change, only to do the
- // actual cleanup.
- void GetData(int index, char** buffer, Addr* address);
-
- // Generates a histogram for the time spent working on this operation.
- void ReportIOTime(Operation op, const base::TimeTicks& start);
-
- // Logs this entry to the internal trace buffer.
- void Log(const char* msg);
-
- scoped_ptr<EntryRecord> entry_; // Basic record for this entry.
- scoped_ptr<ShortEntryRecord> short_entry_; // Valid for evicted entries.
- base::WeakPtr<BackendImplV3> backend_; // Back pointer to the cache.
- scoped_ptr<UserBuffer> user_buffers_[kNumStreams]; // Stores user data.
- mutable std::string key_; // Copy of the key.
- Addr address_;
- int unreported_size_[kNumStreams]; // Bytes not reported yet to the backend.
- bool doomed_; // True if this entry was removed from the cache.
- bool read_only_;
- bool dirty_; // True if there is something to write.
- bool modified_;
- // scoped_ptr<SparseControlV3> sparse_; // Support for sparse entries.
-
- net::BoundNetLog net_log_;
-
- DISALLOW_COPY_AND_ASSIGN(EntryImplV3);
-};
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_ENTRY_IMPL_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/eviction.cc b/chromium/net/disk_cache/blockfile/eviction.cc
index 6e0dcd5a833..860b8b11006 100644
--- a/chromium/net/disk_cache/blockfile/eviction.cc
+++ b/chromium/net/disk_cache/blockfile/eviction.cc
@@ -38,7 +38,7 @@
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/disk_cache/blockfile/backend_impl.h"
#include "net/disk_cache/blockfile/disk_format.h"
diff --git a/chromium/net/disk_cache/blockfile/eviction_v3.cc b/chromium/net/disk_cache/blockfile/eviction_v3.cc
deleted file mode 100644
index c658a8e0e52..00000000000
--- a/chromium/net/disk_cache/blockfile/eviction_v3.cc
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The eviction policy is a very simple pure LRU, so the elements at the end of
-// the list are evicted until kCleanUpMargin free space is available. There is
-// only one list in use (Rankings::NO_USE), and elements are sent to the front
-// of the list whenever they are accessed.
-
-// The new (in-development) eviction policy adds re-use as a factor to evict
-// an entry. The story so far:
-
-// Entries are linked on separate lists depending on how often they are used.
-// When we see an element for the first time, it goes to the NO_USE list; if
-// the object is reused later on, we move it to the LOW_USE list, until it is
-// used kHighUse times, at which point it is moved to the HIGH_USE list.
-// Whenever an element is evicted, we move it to the DELETED list so that if the
-// element is accessed again, we remember the fact that it was already stored
-// and maybe in the future we don't evict that element.
-
-// When we have to evict an element, first we try to use the last element from
-// the NO_USE list, then we move to the LOW_USE and only then we evict an entry
-// from the HIGH_USE. We attempt to keep entries on the cache for at least
-// kTargetTime hours (with frequently accessed items stored for longer periods),
-// but if we cannot do that, we fall-back to keep each list roughly the same
-// size so that we have a chance to see an element again and move it to another
-// list.
-
-#include "net/disk_cache/blockfile/eviction_v3.h"
-
-#include <stdint.h>
-
-#include <limits>
-
-#include "base/bind.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_util.h"
-#include "base/time/time.h"
-#include "net/disk_cache/blockfile/backend_impl_v3.h"
-#include "net/disk_cache/blockfile/entry_impl_v3.h"
-#include "net/disk_cache/blockfile/experiments.h"
-#include "net/disk_cache/blockfile/histogram_macros_v3.h"
-#include "net/disk_cache/blockfile/trace.h"
-
-#define CACHE_UMA_BACKEND_IMPL_OBJ backend_
-
-using base::Time;
-using base::TimeTicks;
-
-namespace {
-
-const int kCleanUpMargin = 1024 * 1024;
-
-#if defined(V3_NOT_JUST_YET_READY)
-const int kHighUse = 10; // Reuse count to be on the HIGH_USE list.
-const int kTargetTime = 24 * 7; // Time to be evicted (hours since last use).
-const int kMaxDelayedTrims = 60;
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-int LowWaterAdjust(int high_water) {
- if (high_water < kCleanUpMargin)
- return 0;
-
- return high_water - kCleanUpMargin;
-}
-
-#if defined(V3_NOT_JUST_YET_READY)
-bool FallingBehind(int current_size, int max_size) {
- return current_size > max_size - kCleanUpMargin * 20;
-}
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-} // namespace
-
-namespace disk_cache {
-
-// The real initialization happens during Init(), init_ is the only member that
-// has to be initialized here.
-EvictionV3::EvictionV3()
- : backend_(NULL),
- index_(NULL),
- header_(NULL),
- init_(false),
- ptr_factory_(this) {
-}
-
-EvictionV3::~EvictionV3() {
-}
-
-void EvictionV3::Init(BackendImplV3* backend) {
- // We grab a bunch of info from the backend to make the code a little cleaner
- // when we're actually doing work.
- backend_ = backend;
- index_ = &backend_->index_;
- header_ = index_->header();
- max_size_ = LowWaterAdjust(backend_->max_size_);
- lru_ = backend->lru_eviction_;
- first_trim_ = true;
- trimming_ = false;
- delay_trim_ = false;
- trim_delays_ = 0;
- init_ = true;
- test_mode_ = false;
-}
-
-void EvictionV3::Stop() {
- // It is possible for the backend initialization to fail, in which case this
- // object was never initialized... and there is nothing to do.
- if (!init_)
- return;
-
- // We want to stop further evictions, so let's pretend that we are busy from
- // this point on.
- DCHECK(!trimming_);
- trimming_ = true;
- ptr_factory_.InvalidateWeakPtrs();
-}
-
-#if defined(V3_NOT_JUST_YET_READY)
-void EvictionV3::TrimCache() {
- if (backend_->disabled_ || trimming_)
- return;
-
- if (!empty && !ShouldTrim())
- return PostDelayedTrim();
-
- if (new_eviction_)
- return TrimCacheV2(empty);
-
- Trace("*** Trim Cache ***");
- trimming_ = true;
- TimeTicks start = TimeTicks::Now();
- Rankings::ScopedRankingsBlock node(rankings_);
- Rankings::ScopedRankingsBlock next(
- rankings_, rankings_->GetPrev(node.get(), Rankings::NO_USE));
- int deleted_entries = 0;
- int target_size = empty ? 0 : max_size_;
- while ((header_->num_bytes > target_size || test_mode_) && next.get()) {
- // The iterator could be invalidated within EvictEntry().
- if (!next->HasData())
- break;
- node.reset(next.release());
- next.reset(rankings_->GetPrev(node.get(), Rankings::NO_USE));
- if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
- // This entry is not being used by anybody.
- // Do NOT use node as an iterator after this point.
- rankings_->TrackRankingsBlock(node.get(), false);
- if (EvictEntry(node.get(), empty, Rankings::NO_USE) && !test_mode_)
- deleted_entries++;
-
- if (!empty && test_mode_)
- break;
- }
- if (!empty && (deleted_entries > 20 ||
- (TimeTicks::Now() - start).InMilliseconds() > 20)) {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&EvictionV3::TrimCache, ptr_factory_.GetWeakPtr(), false));
- break;
- }
- }
-
- if (empty) {
- CACHE_UMA(AGE_MS, "TotalClearTimeV1", 0, start);
- } else {
- CACHE_UMA(AGE_MS, "TotalTrimTimeV1", 0, start);
- }
- CACHE_UMA(COUNTS, "TrimItemsV1", 0, deleted_entries);
-
- trimming_ = false;
- Trace("*** Trim Cache end ***");
- return;
-}
-
-void EvictionV3::OnOpenEntry(EntryImplV3* entry) {
- EntryStore* info = entry->entry()->Data();
- DCHECK_EQ(ENTRY_NORMAL, info->state);
-
- if (info->reuse_count < std::numeric_limits<int32_t>::max()) {
- info->reuse_count++;
- entry->entry()->set_modified();
-
- // We may need to move this to a new list.
- if (1 == info->reuse_count) {
- rankings_->Remove(entry->rankings(), Rankings::NO_USE, true);
- rankings_->Insert(entry->rankings(), false, Rankings::LOW_USE);
- entry->entry()->Store();
- } else if (kHighUse == info->reuse_count) {
- rankings_->Remove(entry->rankings(), Rankings::LOW_USE, true);
- rankings_->Insert(entry->rankings(), false, Rankings::HIGH_USE);
- entry->entry()->Store();
- }
- }
-}
-
-void EvictionV3::OnCreateEntry(EntryImplV3* entry) {
- EntryStore* info = entry->entry()->Data();
- switch (info->state) {
- case ENTRY_NORMAL: {
- DCHECK(!info->reuse_count);
- DCHECK(!info->refetch_count);
- break;
- };
- case ENTRY_EVICTED: {
- if (info->refetch_count < std::numeric_limits<int32_t>::max())
- info->refetch_count++;
-
- if (info->refetch_count > kHighUse && info->reuse_count < kHighUse) {
- info->reuse_count = kHighUse;
- } else {
- info->reuse_count++;
- }
- info->state = ENTRY_NORMAL;
- entry->entry()->Store();
- rankings_->Remove(entry->rankings(), Rankings::DELETED, true);
- break;
- };
- default:
- NOTREACHED();
- }
-
- rankings_->Insert(entry->rankings(), true, GetListForEntryV2(entry));
-}
-
-void EvictionV3::SetTestMode() {
- test_mode_ = true;
-}
-
-void EvictionV3::TrimDeletedList(bool empty) {
- DCHECK(test_mode_ && new_eviction_);
- TrimDeleted(empty);
-}
-
-// -----------------------------------------------------------------------
-
-void EvictionV3::PostDelayedTrim() {
- // Prevent posting multiple tasks.
- if (delay_trim_)
- return;
- delay_trim_ = true;
- trim_delays_++;
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&EvictionV3::DelayedTrim, ptr_factory_.GetWeakPtr()),
- base::TimeDelta::FromMilliseconds(1000));
-}
-
-void EvictionV3::DelayedTrim() {
- delay_trim_ = false;
- if (trim_delays_ < kMaxDelayedTrims && backend_->IsLoaded())
- return PostDelayedTrim();
-
- TrimCache(false);
-}
-
-bool EvictionV3::ShouldTrim() {
- if (!FallingBehind(header_->num_bytes, max_size_) &&
- trim_delays_ < kMaxDelayedTrims && backend_->IsLoaded()) {
- return false;
- }
-
- UMA_HISTOGRAM_COUNTS("DiskCache.TrimDelays", trim_delays_);
- trim_delays_ = 0;
- return true;
-}
-
-bool EvictionV3::ShouldTrimDeleted() {
- int index_load = header_->num_entries * 100 / index_size_;
-
- // If the index is not loaded, the deleted list will tend to double the size
- // of the other lists 3 lists (40% of the total). Otherwise, all lists will be
- // about the same size.
- int max_length = (index_load < 25) ? header_->num_entries * 2 / 5 :
- header_->num_entries / 4;
- return (!test_mode_ && header_->lru.sizes[Rankings::DELETED] > max_length);
-}
-
-bool Eviction::EvictEntry(CacheRankingsBlock* node, bool empty,
- Rankings::List list) {
- EntryImplV3* entry = backend_->GetEnumeratedEntry(node, list);
- if (!entry) {
- Trace("NewEntry failed on Trim 0x%x", node->address().value());
- return false;
- }
-
- ReportTrimTimes(entry);
- if (empty || !new_eviction_) {
- entry->DoomImpl();
- } else {
- entry->DeleteEntryData(false);
- EntryStore* info = entry->entry()->Data();
- DCHECK_EQ(ENTRY_NORMAL, info->state);
-
- rankings_->Remove(entry->rankings(), GetListForEntryV2(entry), true);
- info->state = ENTRY_EVICTED;
- entry->entry()->Store();
- rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
- }
- if (!empty)
- backend_->OnEvent(Stats::TRIM_ENTRY);
-
- entry->Release();
-
- return true;
-}
-
-void EvictionV3::TrimCacheV2(bool empty) {
- Trace("*** Trim Cache ***");
- trimming_ = true;
- TimeTicks start = TimeTicks::Now();
-
- const int kListsToSearch = 3;
- Rankings::ScopedRankingsBlock next[kListsToSearch];
- int list = Rankings::LAST_ELEMENT;
-
- // Get a node from each list.
- for (int i = 0; i < kListsToSearch; i++) {
- bool done = false;
- next[i].set_rankings(rankings_);
- if (done)
- continue;
- next[i].reset(rankings_->GetPrev(NULL, static_cast<Rankings::List>(i)));
- if (!empty && NodeIsOldEnough(next[i].get(), i)) {
- list = static_cast<Rankings::List>(i);
- done = true;
- }
- }
-
- // If we are not meeting the time targets lets move on to list length.
- if (!empty && Rankings::LAST_ELEMENT == list)
- list = SelectListByLength(next);
-
- if (empty)
- list = 0;
-
- Rankings::ScopedRankingsBlock node(rankings_);
- int deleted_entries = 0;
- int target_size = empty ? 0 : max_size_;
-
- for (; list < kListsToSearch; list++) {
- while ((header_->num_bytes > target_size || test_mode_) &&
- next[list].get()) {
- // The iterator could be invalidated within EvictEntry().
- if (!next[list]->HasData())
- break;
- node.reset(next[list].release());
- next[list].reset(rankings_->GetPrev(node.get(),
- static_cast<Rankings::List>(list)));
- if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
- // This entry is not being used by anybody.
- // Do NOT use node as an iterator after this point.
- rankings_->TrackRankingsBlock(node.get(), false);
- if (EvictEntry(node.get(), empty, static_cast<Rankings::List>(list)))
- deleted_entries++;
-
- if (!empty && test_mode_)
- break;
- }
- if (!empty && (deleted_entries > 20 ||
- (TimeTicks::Now() - start).InMilliseconds() > 20)) {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&Eviction::TrimCache, ptr_factory_.GetWeakPtr(), false));
- break;
- }
- }
- if (!empty)
- list = kListsToSearch;
- }
-
- if (empty) {
- TrimDeleted(true);
- } else if (ShouldTrimDeleted()) {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&EvictionV3::TrimDeleted, ptr_factory_.GetWeakPtr(), empty));
- }
-
- if (empty) {
- CACHE_UMA(AGE_MS, "TotalClearTimeV2", 0, start);
- } else {
- CACHE_UMA(AGE_MS, "TotalTrimTimeV2", 0, start);
- }
- CACHE_UMA(COUNTS, "TrimItemsV2", 0, deleted_entries);
-
- Trace("*** Trim Cache end ***");
- trimming_ = false;
- return;
-}
-
-// This is a minimal implementation that just discards the oldest nodes.
-// TODO(rvargas): Do something better here.
-void EvictionV3::TrimDeleted(bool empty) {
- Trace("*** Trim Deleted ***");
- if (backend_->disabled_)
- return;
-
- TimeTicks start = TimeTicks::Now();
- Rankings::ScopedRankingsBlock node(rankings_);
- Rankings::ScopedRankingsBlock next(
- rankings_, rankings_->GetPrev(node.get(), Rankings::DELETED));
- int deleted_entries = 0;
- while (next.get() &&
- (empty || (deleted_entries < 20 &&
- (TimeTicks::Now() - start).InMilliseconds() < 20))) {
- node.reset(next.release());
- next.reset(rankings_->GetPrev(node.get(), Rankings::DELETED));
- if (RemoveDeletedNode(node.get()))
- deleted_entries++;
- if (test_mode_)
- break;
- }
-
- if (deleted_entries && !empty && ShouldTrimDeleted()) {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&EvictionV3::TrimDeleted, ptr_factory_.GetWeakPtr(), false));
- }
-
- CACHE_UMA(AGE_MS, "TotalTrimDeletedTime", 0, start);
- CACHE_UMA(COUNTS, "TrimDeletedItems", 0, deleted_entries);
- Trace("*** Trim Deleted end ***");
- return;
-}
-
-void EvictionV3::ReportTrimTimes(EntryImplV3* entry) {
- if (first_trim_) {
- first_trim_ = false;
- if (backend_->ShouldReportAgain()) {
- CACHE_UMA(AGE, "TrimAge", 0, entry->GetLastUsed());
- ReportListStats();
- }
-
- if (header_->lru.filled)
- return;
-
- header_->lru.filled = 1;
-
- if (header_->create_time) {
- // This is the first entry that we have to evict, generate some noise.
- backend_->FirstEviction();
- } else {
- // This is an old file, but we may want more reports from this user so
- // lets save some create_time.
- Time::Exploded old = {0};
- old.year = 2009;
- old.month = 3;
- old.day_of_month = 1;
- header_->create_time = Time::FromLocalExploded(old).ToInternalValue();
- }
- }
-}
-
-bool EvictionV3::NodeIsOldEnough(CacheRankingsBlock* node, int list) {
- if (!node)
- return false;
-
- // If possible, we want to keep entries on each list at least kTargetTime
- // hours. Each successive list on the enumeration has 2x the target time of
- // the previous list.
- Time used = Time::FromInternalValue(node->Data()->last_used);
- int multiplier = 1 << list;
- return (Time::Now() - used).InHours() > kTargetTime * multiplier;
-}
-
-int EvictionV3::SelectListByLength(Rankings::ScopedRankingsBlock* next) {
- int data_entries = header_->num_entries -
- header_->lru.sizes[Rankings::DELETED];
- // Start by having each list to be roughly the same size.
- if (header_->lru.sizes[0] > data_entries / 3)
- return 0;
-
- int list = (header_->lru.sizes[1] > data_entries / 3) ? 1 : 2;
-
- // Make sure that frequently used items are kept for a minimum time; we know
- // that this entry is not older than its current target, but it must be at
- // least older than the target for list 0 (kTargetTime), as long as we don't
- // exhaust list 0.
- if (!NodeIsOldEnough(next[list].get(), 0) &&
- header_->lru.sizes[0] > data_entries / 10)
- list = 0;
-
- return list;
-}
-
-void EvictionV3::ReportListStats() {
- if (!new_eviction_)
- return;
-
- Rankings::ScopedRankingsBlock last1(rankings_,
- rankings_->GetPrev(NULL, Rankings::NO_USE));
- Rankings::ScopedRankingsBlock last2(rankings_,
- rankings_->GetPrev(NULL, Rankings::LOW_USE));
- Rankings::ScopedRankingsBlock last3(rankings_,
- rankings_->GetPrev(NULL, Rankings::HIGH_USE));
- Rankings::ScopedRankingsBlock last4(rankings_,
- rankings_->GetPrev(NULL, Rankings::DELETED));
-
- if (last1.get())
- CACHE_UMA(AGE, "NoUseAge", 0,
- Time::FromInternalValue(last1.get()->Data()->last_used));
- if (last2.get())
- CACHE_UMA(AGE, "LowUseAge", 0,
- Time::FromInternalValue(last2.get()->Data()->last_used));
- if (last3.get())
- CACHE_UMA(AGE, "HighUseAge", 0,
- Time::FromInternalValue(last3.get()->Data()->last_used));
- if (last4.get())
- CACHE_UMA(AGE, "DeletedAge", 0,
- Time::FromInternalValue(last4.get()->Data()->last_used));
-}
-#endif // defined(V3_NOT_JUST_YET_READY).
-
-} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/eviction_v3.h b/chromium/net/disk_cache/blockfile/eviction_v3.h
deleted file mode 100644
index a9b48f41839..00000000000
--- a/chromium/net/disk_cache/blockfile/eviction_v3.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_EVICTION_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_EVICTION_V3_H_
-
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "net/disk_cache/blockfile/disk_format_v3.h"
-#include "net/disk_cache/blockfile/index_table_v3.h"
-
-namespace disk_cache {
-
-class BackendImplV3;
-class CacheRankingsBlock;
-class EntryImplV3;
-
-namespace Rankings {
-typedef int List;
-}
-
-// This class implements the eviction algorithm for the cache and it is tightly
-// integrated with BackendImpl.
-class EvictionV3 {
- public:
- EvictionV3();
- ~EvictionV3();
-
- void Init(BackendImplV3* backend);
- void Stop();
-
- // Deletes entries from the cache until the current size is below the limit.
- // If empty is true, the whole cache will be trimmed, regardless of being in
- // use.
- void TrimCache(bool empty);
-
- // Notifications of interesting events for a given entry.
- void OnOpenEntry(EntryImplV3* entry);
- void OnCreateEntry(EntryImplV3* entry);
-
- // Testing interface.
- void SetTestMode();
- void TrimDeletedList(bool empty);
-
- private:
- void PostDelayedTrim();
- void DelayedTrim();
- bool ShouldTrim();
- bool ShouldTrimDeleted();
- bool EvictEntry(CacheRankingsBlock* node, bool empty, Rankings::List list);
-
- void TrimCacheV2(bool empty);
- void TrimDeleted(bool empty);
-
- bool NodeIsOldEnough(CacheRankingsBlock* node, int list);
- int SelectListByLength();
- void ReportListStats();
-
- BackendImplV3* backend_;
- IndexTable* index_;
- IndexHeaderV3* header_;
- int max_size_;
- int trim_delays_;
- bool lru_;
- bool first_trim_;
- bool trimming_;
- bool delay_trim_;
- bool init_;
- bool test_mode_;
- base::WeakPtrFactory<EvictionV3> ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(EvictionV3);
-};
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_EVICTION_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/file_win.cc b/chromium/net/disk_cache/blockfile/file_win.cc
index 1c5a9668763..f38595d3bf1 100644
--- a/chromium/net/disk_cache/blockfile/file_win.cc
+++ b/chromium/net/disk_cache/blockfile/file_win.cc
@@ -62,8 +62,6 @@ void CompletionHandler::OnIOCompleted(
MyOverlapped::MyOverlapped(disk_cache::File* file, size_t offset,
disk_cache::FileIOCallback* callback) {
- memset(this, 0, sizeof(*this));
- context_.handler = g_completion_handler.Pointer();
context_.overlapped.Offset = static_cast<DWORD>(offset);
file_ = file;
callback_ = callback;
diff --git a/chromium/net/disk_cache/blockfile/histogram_macros_v3.h b/chromium/net/disk_cache/blockfile/histogram_macros_v3.h
deleted file mode 100644
index 52b75d2118d..00000000000
--- a/chromium/net/disk_cache/blockfile/histogram_macros_v3.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains macros to simplify histogram reporting from the disk
-// cache. The main issue is that we want to have separate histograms for each
-// type of cache (regular vs. media, etc), without adding the complexity of
-// keeping track of a potentially large number of histogram objects that have to
-// survive the backend object that created them.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_HISTOGRAM_MACROS_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_HISTOGRAM_MACROS_V3_H_
-
-#include "base/metrics/histogram.h"
-
-// -----------------------------------------------------------------------------
-
-// These histograms follow the definition of UMA_HISTOGRAMN_XXX except that
-// whenever the name changes (the experiment group changes), the histrogram
-// object is re-created.
-
-#define CACHE_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
- do { \
- base::HistogramBase* counter(NULL); \
- if (!counter || name != counter->histogram_name()) \
- counter = base::Histogram::FactoryGet( \
- name, min, max, bucket_count, \
- base::Histogram::kUmaTargetedHistogramFlag); \
- counter->Add(sample); \
- } while (0)
-
-#define CACHE_HISTOGRAM_COUNTS(name, sample) CACHE_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 1000000, 50)
-
-#define CACHE_HISTOGRAM_COUNTS_10000(name, sample) \
- CACHE_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
-
-#define CACHE_HISTOGRAM_COUNTS_50000(name, sample) \
- CACHE_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 50000000, 50)
-
-#define CACHE_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
- do { \
- base::HistogramBase* counter(NULL); \
- if (!counter || name != counter->histogram_name()) \
- counter = base::Histogram::FactoryTimeGet( \
- name, min, max, bucket_count, \
- base::Histogram::kUmaTargetedHistogramFlag); \
- counter->AddTime(sample); \
- } while (0)
-
-#define CACHE_HISTOGRAM_TIMES(name, sample) CACHE_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
- base::TimeDelta::FromSeconds(10), 50)
-
-#define CACHE_HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
- base::HistogramBase* counter(NULL); \
- if (!counter || name != counter->histogram_name()) \
- counter = base::LinearHistogram::FactoryGet( \
- name, 1, boundary_value, boundary_value + 1, \
- base::Histogram::kUmaTargetedHistogramFlag); \
- counter->Add(sample); \
- } while (0)
-
-#define CACHE_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
- CACHE_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
-
-// -----------------------------------------------------------------------------
-
-// HISTOGRAM_HOURS will collect time related data with a granularity of hours
-// and normal values of a few months.
-#define CACHE_HISTOGRAM_HOURS CACHE_HISTOGRAM_COUNTS_10000
-
-// HISTOGRAM_AGE will collect time elapsed since |initial_time|, with a
-// granularity of hours and normal values of a few months.
-#define CACHE_HISTOGRAM_AGE(name, initial_time) \
- CACHE_HISTOGRAM_COUNTS_10000(name, \
- (base::Time::Now() - initial_time).InHours())
-
-// HISTOGRAM_AGE_MS will collect time elapsed since |initial_time|, with the
-// normal resolution of the UMA_HISTOGRAM_TIMES.
-#define CACHE_HISTOGRAM_AGE_MS(name, initial_time)\
- CACHE_HISTOGRAM_TIMES(name, base::TimeTicks::Now() - initial_time)
-
-#define CACHE_HISTOGRAM_CACHE_ERROR(name, sample) \
- CACHE_HISTOGRAM_ENUMERATION(name, sample, 50)
-
-// Generates a UMA histogram of the given type, generating the proper name for
-// it (asking backend_->HistogramName), and adding the provided sample.
-// For example, to generate a regualar UMA_HISTOGRAM_COUNTS, this macro would
-// be used as:
-// CACHE_UMA(COUNTS, "MyName", 20);
-// which may translate to:
-// UMA_HISTOGRAM_COUNTS("DiskCache3.MyName_AppCache", 20);
-//
-#define CACHE_UMA(type, name, sample) {\
- const std::string my_name =\
- CACHE_UMA_BACKEND_IMPL_OBJ->HistogramName(name);\
- switch (CACHE_UMA_BACKEND_IMPL_OBJ->cache_type()) {\
- case net::DISK_CACHE:\
- case net::MEDIA_CACHE:\
- case net::APP_CACHE:\
- case net::SHADER_CACHE:\
- case net::PNACL_CACHE:\
- CACHE_HISTOGRAM_##type(my_name.data(), sample);\
- break;\
- default:\
- break;\
- }\
- }
-
-#endif // NET_DISK_CACHE_BLOCKFILE_HISTOGRAM_MACROS_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc b/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc
index 307f7d2a8d0..7cd3a0537a3 100644
--- a/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc
+++ b/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc
@@ -128,7 +128,7 @@ void BackendIO::OpenNextEntry(Rankings::Iterator* iterator,
entry_ptr_ = next_entry;
}
-void BackendIO::EndEnumeration(scoped_ptr<Rankings::Iterator> iterator) {
+void BackendIO::EndEnumeration(std::unique_ptr<Rankings::Iterator> iterator) {
operation_ = OP_END_ENUMERATION;
scoped_iterator_ = std::move(iterator);
}
@@ -413,7 +413,7 @@ void InFlightBackendIO::OpenNextEntry(Rankings::Iterator* iterator,
}
void InFlightBackendIO::EndEnumeration(
- scoped_ptr<Rankings::Iterator> iterator) {
+ std::unique_ptr<Rankings::Iterator> iterator) {
scoped_refptr<BackendIO> operation(
new BackendIO(this, backend_, net::CompletionCallback()));
operation->EndEnumeration(std::move(iterator));
diff --git a/chromium/net/disk_cache/blockfile/in_flight_backend_io.h b/chromium/net/disk_cache/blockfile/in_flight_backend_io.h
index 5ac0bf44235..e1456ceadfc 100644
--- a/chromium/net/disk_cache/blockfile/in_flight_backend_io.h
+++ b/chromium/net/disk_cache/blockfile/in_flight_backend_io.h
@@ -61,7 +61,7 @@ class BackendIO : public BackgroundIO {
void DoomEntriesSince(const base::Time initial_time);
void CalculateSizeOfAllEntries();
void OpenNextEntry(Rankings::Iterator* iterator, Entry** next_entry);
- void EndEnumeration(scoped_ptr<Rankings::Iterator> iterator);
+ void EndEnumeration(std::unique_ptr<Rankings::Iterator> iterator);
void OnExternalCacheHit(const std::string& key);
void CloseEntryImpl(EntryImpl* entry);
void DoomEntryImpl(EntryImpl* entry);
@@ -140,7 +140,7 @@ class BackendIO : public BackgroundIO {
base::Time initial_time_;
base::Time end_time_;
Rankings::Iterator* iterator_;
- scoped_ptr<Rankings::Iterator> scoped_iterator_;
+ std::unique_ptr<Rankings::Iterator> scoped_iterator_;
EntryImpl* entry_;
int index_;
int offset_;
@@ -180,7 +180,7 @@ class InFlightBackendIO : public InFlightIO {
void CalculateSizeOfAllEntries(const net::CompletionCallback& callback);
void OpenNextEntry(Rankings::Iterator* iterator, Entry** next_entry,
const net::CompletionCallback& callback);
- void EndEnumeration(scoped_ptr<Rankings::Iterator> iterator);
+ void EndEnumeration(std::unique_ptr<Rankings::Iterator> iterator);
void OnExternalCacheHit(const std::string& key);
void CloseEntryImpl(EntryImpl* entry);
void DoomEntryImpl(EntryImpl* entry);
diff --git a/chromium/net/disk_cache/blockfile/in_flight_io.cc b/chromium/net/disk_cache/blockfile/in_flight_io.cc
index f04b8ddae58..0334e80fefd 100644
--- a/chromium/net/disk_cache/blockfile/in_flight_io.cc
+++ b/chromium/net/disk_cache/blockfile/in_flight_io.cc
@@ -10,8 +10,8 @@
#include "base/profiler/scoped_tracker.h"
#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
-#include "base/thread_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
namespace disk_cache {
diff --git a/chromium/net/disk_cache/blockfile/index_table_v3.cc b/chromium/net/disk_cache/blockfile/index_table_v3.cc
deleted file mode 100644
index 9dca3de0596..00000000000
--- a/chromium/net/disk_cache/blockfile/index_table_v3.cc
+++ /dev/null
@@ -1,1152 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/index_table_v3.h"
-
-#include <algorithm>
-#include <limits>
-#include <set>
-#include <utility>
-
-#include "base/bit_cast.h"
-#include "base/bits.h"
-#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
-#include "net/disk_cache/disk_cache.h"
-
-using base::Time;
-using base::TimeDelta;
-using disk_cache::CellInfo;
-using disk_cache::CellList;
-using disk_cache::IndexCell;
-using disk_cache::IndexIterator;
-
-namespace {
-
-// The following constants describe the bitfields of an IndexCell so they are
-// implicitly synchronized with the descrption of IndexCell on file_format_v3.h.
-const uint64_t kCellLocationMask = (1 << 22) - 1;
-const uint64_t kCellIdMask = (1 << 18) - 1;
-const uint64_t kCellTimestampMask = (1 << 20) - 1;
-const uint64_t kCellReuseMask = (1 << 4) - 1;
-const uint8_t kCellStateMask = (1 << 3) - 1;
-const uint8_t kCellGroupMask = (1 << 3) - 1;
-const uint8_t kCellSumMask = (1 << 2) - 1;
-
-const uint64_t kCellSmallTableLocationMask = (1 << 16) - 1;
-const uint64_t kCellSmallTableIdMask = (1 << 24) - 1;
-
-const int kCellIdOffset = 22;
-const int kCellTimestampOffset = 40;
-const int kCellReuseOffset = 60;
-const int kCellGroupOffset = 3;
-const int kCellSumOffset = 6;
-
-const int kCellSmallTableIdOffset = 16;
-
-// The number of bits that a hash has to be shifted to grab the part that
-// defines the cell id.
-const int kHashShift = 14;
-const int kSmallTableHashShift = 8;
-
-// Unfortunately we have to break the abstaction a little here: the file number
-// where entries are stored is outside of the control of this code, and it is
-// usually part of the stored address. However, for small tables we only store
-// 16 bits of the address so the file number is never stored on a cell. We have
-// to infere the file number from the type of entry (normal vs evicted), and
-// the knowledge that given that the table will not keep more than 64k entries,
-// a single file of each type is enough.
-const int kEntriesFile = disk_cache::BLOCK_ENTRIES - 1;
-const int kEvictedEntriesFile = disk_cache::BLOCK_EVICTED - 1;
-const int kMaxLocation = 1 << 22;
-const int kMinFileNumber = 1 << 16;
-
-uint32_t GetCellLocation(const IndexCell& cell) {
- return cell.first_part & kCellLocationMask;
-}
-
-uint32_t GetCellSmallTableLocation(const IndexCell& cell) {
- return cell.first_part & kCellSmallTableLocationMask;
-}
-
-uint32_t GetCellId(const IndexCell& cell) {
- return (cell.first_part >> kCellIdOffset) & kCellIdMask;
-}
-
-uint32_t GetCellSmallTableId(const IndexCell& cell) {
- return (cell.first_part >> kCellSmallTableIdOffset) &
- kCellSmallTableIdMask;
-}
-
-int GetCellTimestamp(const IndexCell& cell) {
- return (cell.first_part >> kCellTimestampOffset) & kCellTimestampMask;
-}
-
-int GetCellReuse(const IndexCell& cell) {
- return (cell.first_part >> kCellReuseOffset) & kCellReuseMask;
-}
-
-int GetCellState(const IndexCell& cell) {
- return cell.last_part & kCellStateMask;
-}
-
-int GetCellGroup(const IndexCell& cell) {
- return (cell.last_part >> kCellGroupOffset) & kCellGroupMask;
-}
-
-int GetCellSum(const IndexCell& cell) {
- return (cell.last_part >> kCellSumOffset) & kCellSumMask;
-}
-
-void SetCellLocation(IndexCell* cell, uint32_t address) {
- DCHECK_LE(address, static_cast<uint32_t>(kCellLocationMask));
- cell->first_part &= ~kCellLocationMask;
- cell->first_part |= address;
-}
-
-void SetCellSmallTableLocation(IndexCell* cell, uint32_t address) {
- DCHECK_LE(address, static_cast<uint32_t>(kCellSmallTableLocationMask));
- cell->first_part &= ~kCellSmallTableLocationMask;
- cell->first_part |= address;
-}
-
-void SetCellId(IndexCell* cell, uint32_t hash) {
- DCHECK_LE(hash, static_cast<uint32_t>(kCellIdMask));
- cell->first_part &= ~(kCellIdMask << kCellIdOffset);
- cell->first_part |= static_cast<int64_t>(hash) << kCellIdOffset;
-}
-
-void SetCellSmallTableId(IndexCell* cell, uint32_t hash) {
- DCHECK_LE(hash, static_cast<uint32_t>(kCellSmallTableIdMask));
- cell->first_part &= ~(kCellSmallTableIdMask << kCellSmallTableIdOffset);
- cell->first_part |= static_cast<int64_t>(hash) << kCellSmallTableIdOffset;
-}
-
-void SetCellTimestamp(IndexCell* cell, int timestamp) {
- DCHECK_LT(timestamp, 1 << 20);
- DCHECK_GE(timestamp, 0);
- cell->first_part &= ~(kCellTimestampMask << kCellTimestampOffset);
- cell->first_part |= static_cast<int64_t>(timestamp) << kCellTimestampOffset;
-}
-
-void SetCellReuse(IndexCell* cell, int count) {
- DCHECK_LT(count, 16);
- DCHECK_GE(count, 0);
- cell->first_part &= ~(kCellReuseMask << kCellReuseOffset);
- cell->first_part |= static_cast<int64_t>(count) << kCellReuseOffset;
-}
-
-void SetCellState(IndexCell* cell, disk_cache::EntryState state) {
- cell->last_part &= ~kCellStateMask;
- cell->last_part |= state;
-}
-
-void SetCellGroup(IndexCell* cell, disk_cache::EntryGroup group) {
- cell->last_part &= ~(kCellGroupMask << kCellGroupOffset);
- cell->last_part |= group << kCellGroupOffset;
-}
-
-void SetCellSum(IndexCell* cell, int sum) {
- DCHECK_LT(sum, 4);
- DCHECK_GE(sum, 0);
- cell->last_part &= ~(kCellSumMask << kCellSumOffset);
- cell->last_part |= sum << kCellSumOffset;
-}
-
-// This is a very particular way to calculate the sum, so it will not match if
-// compared a gainst a pure 2 bit, modulo 2 sum.
-int CalculateCellSum(const IndexCell& cell) {
- uint32_t* words = bit_cast<uint32_t*>(&cell);
- uint8_t* bytes = bit_cast<uint8_t*>(&cell);
- uint32_t result = words[0] + words[1];
- result += result >> 16;
- result += (result >> 8) + (bytes[8] & 0x3f);
- result += result >> 4;
- result += result >> 2;
- return result & 3;
-}
-
-bool SanityCheck(const IndexCell& cell) {
- if (GetCellSum(cell) != CalculateCellSum(cell))
- return false;
-
- if (GetCellState(cell) > disk_cache::ENTRY_USED ||
- GetCellGroup(cell) == disk_cache::ENTRY_RESERVED ||
- GetCellGroup(cell) > disk_cache::ENTRY_EVICTED) {
- return false;
- }
-
- return true;
-}
-
-int FileNumberFromLocation(int location) {
- return location / kMinFileNumber;
-}
-
-int StartBlockFromLocation(int location) {
- return location % kMinFileNumber;
-}
-
-bool IsValidAddress(disk_cache::Addr address) {
- if (!address.is_initialized() ||
- (address.file_type() != disk_cache::BLOCK_EVICTED &&
- address.file_type() != disk_cache::BLOCK_ENTRIES)) {
- return false;
- }
-
- return address.FileNumber() < FileNumberFromLocation(kMaxLocation);
-}
-
-bool IsNormalState(const IndexCell& cell) {
- disk_cache::EntryState state =
- static_cast<disk_cache::EntryState>(GetCellState(cell));
- DCHECK_NE(state, disk_cache::ENTRY_FREE);
- return state != disk_cache::ENTRY_DELETED &&
- state != disk_cache::ENTRY_FIXING;
-}
-
-inline int GetNextBucket(int min_bucket_num, int max_bucket_num,
- disk_cache::IndexBucket* table,
- disk_cache::IndexBucket** bucket) {
- if (!(*bucket)->next)
- return 0;
-
- int bucket_num = (*bucket)->next / disk_cache::kCellsPerBucket;
- if (bucket_num < min_bucket_num || bucket_num > max_bucket_num) {
- // The next bucket must fall within the extra table. Note that this is not
- // an uncommon path as growing the table may not cleanup the link from the
- // main table to the extra table, and that cleanup is performed here when
- // accessing that bucket for the first time. This behavior has to change if
- // the tables are ever shrinked.
- (*bucket)->next = 0;
- return 0;
- }
- *bucket = &table[bucket_num - min_bucket_num];
- return bucket_num;
-}
-
-// Updates the |iterator| with the current |cell|. This cell may cause all
-// previous cells to be deleted (when a new target timestamp is found), the cell
-// may be added to the list (if it matches the target timestamp), or may it be
-// ignored.
-void UpdateIterator(const disk_cache::EntryCell& cell,
- int limit_time,
- IndexIterator* iterator) {
- int time = cell.GetTimestamp();
- // Look for not interesting times.
- if (iterator->forward && time <= limit_time)
- return;
- if (!iterator->forward && time >= limit_time)
- return;
-
- if ((iterator->forward && time < iterator->timestamp) ||
- (!iterator->forward && time > iterator->timestamp)) {
- // This timestamp is better than the one we had.
- iterator->timestamp = time;
- iterator->cells.clear();
- }
- if (time == iterator->timestamp) {
- CellInfo cell_info = { cell.hash(), cell.GetAddress() };
- iterator->cells.push_back(cell_info);
- }
-}
-
-void InitIterator(IndexIterator* iterator) {
- iterator->cells.clear();
- iterator->timestamp =
- iterator->forward ? std::numeric_limits<int32_t>::max() : 0;
-}
-
-} // namespace
-
-namespace disk_cache {
-
-EntryCell::~EntryCell() {
-}
-
-bool EntryCell::IsValid() const {
- return GetCellLocation(cell_) != 0;
-}
-
-// This code has to map the cell address (up to 22 bits) to a general cache Addr
-// (up to 24 bits of general addressing). It also set the implied file_number
-// in the case of small tables. See also the comment by the definition of
-// kEntriesFile.
-Addr EntryCell::GetAddress() const {
- uint32_t location = GetLocation();
- int file_number = FileNumberFromLocation(location);
- if (small_table_) {
- DCHECK_EQ(0, file_number);
- file_number = (GetGroup() == ENTRY_EVICTED) ? kEvictedEntriesFile :
- kEntriesFile;
- }
- DCHECK_NE(0, file_number);
- FileType file_type = (GetGroup() == ENTRY_EVICTED) ? BLOCK_EVICTED :
- BLOCK_ENTRIES;
- return Addr(file_type, 1, file_number, StartBlockFromLocation(location));
-}
-
-EntryState EntryCell::GetState() const {
- return static_cast<EntryState>(GetCellState(cell_));
-}
-
-EntryGroup EntryCell::GetGroup() const {
- return static_cast<EntryGroup>(GetCellGroup(cell_));
-}
-
-int EntryCell::GetReuse() const {
- return GetCellReuse(cell_);
-}
-
-int EntryCell::GetTimestamp() const {
- return GetCellTimestamp(cell_);
-}
-
-void EntryCell::SetState(EntryState state) {
- SetCellState(&cell_, state);
-}
-
-void EntryCell::SetGroup(EntryGroup group) {
- SetCellGroup(&cell_, group);
-}
-
-void EntryCell::SetReuse(int count) {
- SetCellReuse(&cell_, count);
-}
-
-void EntryCell::SetTimestamp(int timestamp) {
- SetCellTimestamp(&cell_, timestamp);
-}
-
-// Static.
-EntryCell EntryCell::GetEntryCellForTest(int32_t cell_num,
- uint32_t hash,
- Addr address,
- IndexCell* cell,
- bool small_table) {
- if (cell) {
- EntryCell entry_cell(cell_num, hash, *cell, small_table);
- return entry_cell;
- }
-
- return EntryCell(cell_num, hash, address, small_table);
-}
-
-void EntryCell::SerializaForTest(IndexCell* destination) {
- FixSum();
- Serialize(destination);
-}
-
-EntryCell::EntryCell() : cell_num_(0), hash_(0), small_table_(false) {
- cell_.Clear();
-}
-
-EntryCell::EntryCell(int32_t cell_num,
- uint32_t hash,
- Addr address,
- bool small_table)
- : cell_num_(cell_num), hash_(hash), small_table_(small_table) {
- DCHECK(IsValidAddress(address) || !address.value());
-
- cell_.Clear();
- SetCellState(&cell_, ENTRY_NEW);
- SetCellGroup(&cell_, ENTRY_NO_USE);
- if (small_table) {
- DCHECK(address.FileNumber() == kEntriesFile ||
- address.FileNumber() == kEvictedEntriesFile);
- SetCellSmallTableLocation(&cell_, address.start_block());
- SetCellSmallTableId(&cell_, hash >> kSmallTableHashShift);
- } else {
- uint32_t location = address.FileNumber() << 16 | address.start_block();
- SetCellLocation(&cell_, location);
- SetCellId(&cell_, hash >> kHashShift);
- }
-}
-
-EntryCell::EntryCell(int32_t cell_num,
- uint32_t hash,
- const IndexCell& cell,
- bool small_table)
- : cell_num_(cell_num),
- hash_(hash),
- cell_(cell),
- small_table_(small_table) {}
-
-void EntryCell::FixSum() {
- SetCellSum(&cell_, CalculateCellSum(cell_));
-}
-
-uint32_t EntryCell::GetLocation() const {
- if (small_table_)
- return GetCellSmallTableLocation(cell_);
-
- return GetCellLocation(cell_);
-}
-
-uint32_t EntryCell::RecomputeHash() {
- if (small_table_) {
- hash_ &= (1 << kSmallTableHashShift) - 1;
- hash_ |= GetCellSmallTableId(cell_) << kSmallTableHashShift;
- return hash_;
- }
-
- hash_ &= (1 << kHashShift) - 1;
- hash_ |= GetCellId(cell_) << kHashShift;
- return hash_;
-}
-
-void EntryCell::Serialize(IndexCell* destination) const {
- *destination = cell_;
-}
-
-EntrySet::EntrySet() : evicted_count(0), current(0) {
-}
-
-EntrySet::EntrySet(const EntrySet& other) = default;
-
-EntrySet::~EntrySet() {
-}
-
-IndexIterator::IndexIterator() {
-}
-
-IndexIterator::~IndexIterator() {
-}
-
-IndexTableInitData::IndexTableInitData() {
-}
-
-IndexTableInitData::~IndexTableInitData() {
-}
-
-// -----------------------------------------------------------------------
-
-IndexTable::IndexTable(IndexTableBackend* backend)
- : backend_(backend),
- header_(NULL),
- main_table_(NULL),
- extra_table_(NULL),
- modified_(false),
- small_table_(false) {
-}
-
-IndexTable::~IndexTable() {
-}
-
-// For a general description of the index tables see:
-// http://www.chromium.org/developers/design-documents/network-stack/disk-cache/disk-cache-v3#TOC-Index
-//
-// The index is split between two tables: the main_table_ and the extra_table_.
-// The main table can grow only by doubling its number of cells, while the
-// extra table can grow slowly, because it only contain cells that overflow
-// from the main table. In order to locate a given cell, part of the hash is
-// used directly as an index into the main table; once that bucket is located,
-// all cells with that partial hash (i.e., belonging to that bucket) are
-// inspected, and if present, the next bucket (located on the extra table) is
-// then located. For more information on bucket chaining see:
-// http://www.chromium.org/developers/design-documents/network-stack/disk-cache/disk-cache-v3#TOC-Buckets
-//
-// There are two cases when increasing the size:
-// - Doubling the size of the main table
-// - Adding more entries to the extra table
-//
-// For example, consider a 64k main table with 8k cells on the extra table (for
-// a total of 72k cells). Init can be called to add another 8k cells at the end
-// (grow to 80k cells). When the size of the extra table approaches 64k, Init
-// can be called to double the main table (to 128k) and go back to a small extra
-// table.
-void IndexTable::Init(IndexTableInitData* params) {
- bool growing = header_ != NULL;
- scoped_ptr<IndexBucket[]> old_extra_table;
- header_ = &params->index_bitmap->header;
-
- if (params->main_table) {
- if (main_table_) {
- // This is doubling the size of main table.
- DCHECK_EQ(base::bits::Log2Floor(header_->table_len),
- base::bits::Log2Floor(backup_header_->table_len) + 1);
- int extra_size = (header()->max_bucket - mask_) * kCellsPerBucket;
- DCHECK_GE(extra_size, 0);
-
- // Doubling the size implies deleting the extra table and moving as many
- // cells as we can to the main table, so we first copy the old one. This
- // is not required when just growing the extra table because we don't
- // move any cell in that case.
- old_extra_table.reset(new IndexBucket[extra_size]);
- memcpy(old_extra_table.get(), extra_table_,
- extra_size * sizeof(IndexBucket));
- memset(params->extra_table, 0, extra_size * sizeof(IndexBucket));
- }
- main_table_ = params->main_table;
- }
- DCHECK(main_table_);
- extra_table_ = params->extra_table;
-
- // extra_bits_ is really measured against table-size specific values.
- const int kMaxAbsoluteExtraBits = 12; // From smallest to largest table.
- const int kMaxExtraBitsSmallTable = 6; // From smallest to 64K table.
-
- extra_bits_ = base::bits::Log2Floor(header_->table_len) -
- base::bits::Log2Floor(kBaseTableLen);
- DCHECK_GE(extra_bits_, 0);
- DCHECK_LT(extra_bits_, kMaxAbsoluteExtraBits);
-
- // Note that following the previous code the constants could be derived as
- // kMaxAbsoluteExtraBits = base::bits::Log2Floor(max table len) -
- // base::bits::Log2Floor(kBaseTableLen);
- // = 22 - base::bits::Log2Floor(1024) = 22 - 10;
- // kMaxExtraBitsSmallTable = base::bits::Log2Floor(max 16 bit table) - 10.
-
- mask_ = ((kBaseTableLen / kCellsPerBucket) << extra_bits_) - 1;
- small_table_ = extra_bits_ < kMaxExtraBitsSmallTable;
- if (!small_table_)
- extra_bits_ -= kMaxExtraBitsSmallTable;
-
- // table_len keeps the max number of cells stored by the index. We need a
- // bitmap with 1 bit per cell, and that bitmap has num_words 32-bit words.
- int num_words = (header_->table_len + 31) / 32;
-
- if (old_extra_table) {
- // All the cells from the extra table are moving to the new tables so before
- // creating the bitmaps, clear the part of the bitmap referring to the extra
- // table.
- int old_main_table_bit_words = ((mask_ >> 1) + 1) * kCellsPerBucket / 32;
- DCHECK_GT(num_words, old_main_table_bit_words);
- memset(params->index_bitmap->bitmap + old_main_table_bit_words, 0,
- (num_words - old_main_table_bit_words) * sizeof(int32_t));
-
- DCHECK(growing);
- int old_num_words = (backup_header_.get()->table_len + 31) / 32;
- DCHECK_GT(old_num_words, old_main_table_bit_words);
- memset(backup_bitmap_storage_.get() + old_main_table_bit_words, 0,
- (old_num_words - old_main_table_bit_words) * sizeof(int32_t));
- }
- bitmap_.reset(new Bitmap(params->index_bitmap->bitmap, header_->table_len,
- num_words));
-
- if (growing) {
- int old_num_words = (backup_header_.get()->table_len + 31) / 32;
- DCHECK_GE(num_words, old_num_words);
- scoped_ptr<uint32_t[]> storage(new uint32_t[num_words]);
- memcpy(storage.get(), backup_bitmap_storage_.get(),
- old_num_words * sizeof(int32_t));
- memset(storage.get() + old_num_words, 0,
- (num_words - old_num_words) * sizeof(int32_t));
-
- backup_bitmap_storage_.swap(storage);
- backup_header_->table_len = header_->table_len;
- } else {
- backup_bitmap_storage_.reset(params->backup_bitmap.release());
- backup_header_.reset(params->backup_header.release());
- }
-
- num_words = (backup_header_->table_len + 31) / 32;
- backup_bitmap_.reset(new Bitmap(backup_bitmap_storage_.get(),
- backup_header_->table_len, num_words));
- if (old_extra_table)
- MoveCells(old_extra_table.get());
-
- if (small_table_)
- DCHECK(header_->flags & SMALL_CACHE);
-
- // All tables and backups are needed for operation.
- DCHECK(main_table_);
- DCHECK(extra_table_);
- DCHECK(bitmap_.get());
-}
-
-void IndexTable::Shutdown() {
- header_ = NULL;
- main_table_ = NULL;
- extra_table_ = NULL;
- bitmap_.reset();
- backup_bitmap_.reset();
- backup_header_.reset();
- backup_bitmap_storage_.reset();
- modified_ = false;
-}
-
-// The general method for locating cells is to:
-// 1. Get the first bucket. This usually means directly indexing the table (as
-// this method does), or iterating through all possible buckets.
-// 2. Iterate through all the cells in that first bucket.
-// 3. If there is a linked bucket, locate it directly in the extra table.
-// 4. Go back to 2, as needed.
-//
-// One consequence of this pattern is that we never start looking at buckets in
-// the extra table, unless we are following a link from the main table.
-EntrySet IndexTable::LookupEntries(uint32_t hash) {
- EntrySet entries;
- int bucket_num = static_cast<int>(hash & mask_);
- IndexBucket* bucket = &main_table_[bucket_num];
- do {
- for (int i = 0; i < kCellsPerBucket; i++) {
- IndexCell* current_cell = &bucket->cells[i];
- if (!GetLocation(*current_cell))
- continue;
- if (!SanityCheck(*current_cell)) {
- NOTREACHED();
- int cell_num = bucket_num * kCellsPerBucket + i;
- current_cell->Clear();
- bitmap_->Set(cell_num, false);
- backup_bitmap_->Set(cell_num, false);
- modified_ = true;
- continue;
- }
- int cell_num = bucket_num * kCellsPerBucket + i;
- if (MisplacedHash(*current_cell, hash)) {
- HandleMisplacedCell(current_cell, cell_num, hash & mask_);
- } else if (IsHashMatch(*current_cell, hash)) {
- EntryCell entry_cell(cell_num, hash, *current_cell, small_table_);
- CheckState(entry_cell);
- if (entry_cell.GetState() != ENTRY_DELETED) {
- entries.cells.push_back(entry_cell);
- if (entry_cell.GetGroup() == ENTRY_EVICTED)
- entries.evicted_count++;
- }
- }
- }
- bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
- &bucket);
- } while (bucket_num);
- return entries;
-}
-
-EntryCell IndexTable::CreateEntryCell(uint32_t hash, Addr address) {
- DCHECK(IsValidAddress(address));
- DCHECK(address.FileNumber() || address.start_block());
-
- int bucket_num = static_cast<int>(hash & mask_);
- int cell_num = 0;
- IndexBucket* bucket = &main_table_[bucket_num];
- IndexCell* current_cell = NULL;
- bool found = false;
- do {
- for (int i = 0; i < kCellsPerBucket && !found; i++) {
- current_cell = &bucket->cells[i];
- if (!GetLocation(*current_cell)) {
- cell_num = bucket_num * kCellsPerBucket + i;
- found = true;
- }
- }
- if (found)
- break;
- bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
- &bucket);
- } while (bucket_num);
-
- if (!found) {
- bucket_num = NewExtraBucket();
- if (bucket_num) {
- cell_num = bucket_num * kCellsPerBucket;
- bucket->next = cell_num;
- bucket = &extra_table_[bucket_num - (mask_ + 1)];
- bucket->hash = hash & mask_;
- found = true;
- } else {
- // address 0 is a reserved value, and the caller interprets it as invalid.
- address.set_value(0);
- }
- }
-
- EntryCell entry_cell(cell_num, hash, address, small_table_);
- if (address.file_type() == BLOCK_EVICTED)
- entry_cell.SetGroup(ENTRY_EVICTED);
- else
- entry_cell.SetGroup(ENTRY_NO_USE);
- Save(&entry_cell);
-
- if (found) {
- bitmap_->Set(cell_num, true);
- backup_bitmap_->Set(cell_num, true);
- header()->used_cells++;
- modified_ = true;
- }
-
- return entry_cell;
-}
-
-EntryCell IndexTable::FindEntryCell(uint32_t hash, Addr address) {
- return FindEntryCellImpl(hash, address, false);
-}
-
-int IndexTable::CalculateTimestamp(Time time) {
- TimeDelta delta = time - Time::FromInternalValue(header_->base_time);
- return std::max(delta.InMinutes(), 0);
-}
-
-base::Time IndexTable::TimeFromTimestamp(int timestamp) {
- return Time::FromInternalValue(header_->base_time) +
- TimeDelta::FromMinutes(timestamp);
-}
-
-void IndexTable::SetSate(uint32_t hash, Addr address, EntryState state) {
- EntryCell cell = FindEntryCellImpl(hash, address, state == ENTRY_FREE);
- if (!cell.IsValid()) {
- NOTREACHED();
- return;
- }
-
- EntryState old_state = cell.GetState();
- switch (state) {
- case ENTRY_FREE:
- DCHECK_EQ(old_state, ENTRY_DELETED);
- break;
- case ENTRY_NEW:
- DCHECK_EQ(old_state, ENTRY_FREE);
- break;
- case ENTRY_OPEN:
- DCHECK_EQ(old_state, ENTRY_USED);
- break;
- case ENTRY_MODIFIED:
- DCHECK_EQ(old_state, ENTRY_OPEN);
- break;
- case ENTRY_DELETED:
- DCHECK(old_state == ENTRY_NEW || old_state == ENTRY_OPEN ||
- old_state == ENTRY_MODIFIED);
- break;
- case ENTRY_USED:
- DCHECK(old_state == ENTRY_NEW || old_state == ENTRY_OPEN ||
- old_state == ENTRY_MODIFIED);
- break;
- case ENTRY_FIXING:
- break;
- };
-
- modified_ = true;
- if (state == ENTRY_DELETED) {
- bitmap_->Set(cell.cell_num(), false);
- backup_bitmap_->Set(cell.cell_num(), false);
- } else if (state == ENTRY_FREE) {
- cell.Clear();
- Write(cell);
- header()->used_cells--;
- return;
- }
- cell.SetState(state);
-
- Save(&cell);
-}
-
-void IndexTable::UpdateTime(uint32_t hash, Addr address, base::Time current) {
- EntryCell cell = FindEntryCell(hash, address);
- if (!cell.IsValid())
- return;
-
- int minutes = CalculateTimestamp(current);
-
- // Keep about 3 months of headroom.
- const int kMaxTimestamp = (1 << 20) - 60 * 24 * 90;
- if (minutes > kMaxTimestamp) {
- // TODO(rvargas):
- // Update header->old_time and trigger a timer
- // Rebaseline timestamps and don't update sums
- // Start a timer (about 2 backups)
- // fix all ckecksums and trigger another timer
- // update header->old_time because rebaseline is done.
- minutes = std::min(minutes, (1 << 20) - 1);
- }
-
- cell.SetTimestamp(minutes);
- Save(&cell);
-}
-
-void IndexTable::Save(EntryCell* cell) {
- cell->FixSum();
- Write(*cell);
-}
-
-void IndexTable::GetOldest(IndexIterator* no_use,
- IndexIterator* low_use,
- IndexIterator* high_use) {
- no_use->forward = true;
- low_use->forward = true;
- high_use->forward = true;
- InitIterator(no_use);
- InitIterator(low_use);
- InitIterator(high_use);
-
- WalkTables(-1, no_use, low_use, high_use);
-}
-
-bool IndexTable::GetNextCells(IndexIterator* iterator) {
- int current_time = iterator->timestamp;
- InitIterator(iterator);
-
- WalkTables(current_time, iterator, iterator, iterator);
- return !iterator->cells.empty();
-}
-
-void IndexTable::OnBackupTimer() {
- if (!modified_)
- return;
-
- int num_words = (header_->table_len + 31) / 32;
- int num_bytes = num_words * 4 + static_cast<int>(sizeof(*header_));
- scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(num_bytes));
- memcpy(buffer->data(), header_, sizeof(*header_));
- memcpy(buffer->data() + sizeof(*header_), backup_bitmap_storage_.get(),
- num_words * 4);
- backend_->SaveIndex(buffer.get(), num_bytes);
- modified_ = false;
-}
-
-// -----------------------------------------------------------------------
-
-EntryCell IndexTable::FindEntryCellImpl(uint32_t hash,
- Addr address,
- bool allow_deleted) {
- int bucket_num = static_cast<int>(hash & mask_);
- IndexBucket* bucket = &main_table_[bucket_num];
- do {
- for (int i = 0; i < kCellsPerBucket; i++) {
- IndexCell* current_cell = &bucket->cells[i];
- if (!GetLocation(*current_cell))
- continue;
- DCHECK(SanityCheck(*current_cell));
- if (IsHashMatch(*current_cell, hash)) {
- // We have a match.
- int cell_num = bucket_num * kCellsPerBucket + i;
- EntryCell entry_cell(cell_num, hash, *current_cell, small_table_);
- if (entry_cell.GetAddress() != address)
- continue;
-
- if (!allow_deleted && entry_cell.GetState() == ENTRY_DELETED)
- continue;
-
- return entry_cell;
- }
- }
- bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
- &bucket);
- } while (bucket_num);
- return EntryCell();
-}
-
-void IndexTable::CheckState(const EntryCell& cell) {
- int current_state = cell.GetState();
- if (current_state != ENTRY_FIXING) {
- bool present = ((current_state & 3) != 0); // Look at the last two bits.
- if (present != bitmap_->Get(cell.cell_num()) ||
- present != backup_bitmap_->Get(cell.cell_num())) {
- // There's a mismatch.
- if (current_state == ENTRY_DELETED) {
- // We were in the process of deleting this entry. Finish now.
- backend_->DeleteCell(cell);
- } else {
- current_state = ENTRY_FIXING;
- EntryCell bad_cell(cell);
- bad_cell.SetState(ENTRY_FIXING);
- Save(&bad_cell);
- }
- }
- }
-
- if (current_state == ENTRY_FIXING)
- backend_->FixCell(cell);
-}
-
-void IndexTable::Write(const EntryCell& cell) {
- IndexBucket* bucket = NULL;
- int bucket_num = cell.cell_num() / kCellsPerBucket;
- if (bucket_num < static_cast<int32_t>(mask_ + 1)) {
- bucket = &main_table_[bucket_num];
- } else {
- DCHECK_LE(bucket_num, header()->max_bucket);
- bucket = &extra_table_[bucket_num - (mask_ + 1)];
- }
-
- int cell_number = cell.cell_num() % kCellsPerBucket;
- if (GetLocation(bucket->cells[cell_number]) && cell.GetLocation()) {
- DCHECK_EQ(cell.GetLocation(),
- GetLocation(bucket->cells[cell_number]));
- }
- cell.Serialize(&bucket->cells[cell_number]);
-}
-
-int IndexTable::NewExtraBucket() {
- int safe_window = (header()->table_len < kNumExtraBlocks * 2) ?
- kNumExtraBlocks / 4 : kNumExtraBlocks;
- if (header()->table_len - header()->max_bucket * kCellsPerBucket <
- safe_window) {
- backend_->GrowIndex();
- }
-
- if (header()->max_bucket * kCellsPerBucket ==
- header()->table_len - kCellsPerBucket) {
- return 0;
- }
-
- header()->max_bucket++;
- return header()->max_bucket;
-}
-
-void IndexTable::WalkTables(int limit_time,
- IndexIterator* no_use,
- IndexIterator* low_use,
- IndexIterator* high_use) {
- header_->num_no_use_entries = 0;
- header_->num_low_use_entries = 0;
- header_->num_high_use_entries = 0;
- header_->num_evicted_entries = 0;
-
- for (int i = 0; i < static_cast<int32_t>(mask_ + 1); i++) {
- int bucket_num = i;
- IndexBucket* bucket = &main_table_[i];
- do {
- UpdateFromBucket(bucket, i, limit_time, no_use, low_use, high_use);
-
- bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
- &bucket);
- } while (bucket_num);
- }
- header_->num_entries = header_->num_no_use_entries +
- header_->num_low_use_entries +
- header_->num_high_use_entries +
- header_->num_evicted_entries;
- modified_ = true;
-}
-
-void IndexTable::UpdateFromBucket(IndexBucket* bucket, int bucket_hash,
- int limit_time,
- IndexIterator* no_use,
- IndexIterator* low_use,
- IndexIterator* high_use) {
- for (int i = 0; i < kCellsPerBucket; i++) {
- IndexCell& current_cell = bucket->cells[i];
- if (!GetLocation(current_cell))
- continue;
- DCHECK(SanityCheck(current_cell));
- if (!IsNormalState(current_cell))
- continue;
-
- EntryCell entry_cell(0, GetFullHash(current_cell, bucket_hash),
- current_cell, small_table_);
- switch (GetCellGroup(current_cell)) {
- case ENTRY_NO_USE:
- UpdateIterator(entry_cell, limit_time, no_use);
- header_->num_no_use_entries++;
- break;
- case ENTRY_LOW_USE:
- UpdateIterator(entry_cell, limit_time, low_use);
- header_->num_low_use_entries++;
- break;
- case ENTRY_HIGH_USE:
- UpdateIterator(entry_cell, limit_time, high_use);
- header_->num_high_use_entries++;
- break;
- case ENTRY_EVICTED:
- header_->num_evicted_entries++;
- break;
- default:
- NOTREACHED();
- }
- }
-}
-
-// This code is only called from Init() so the internal state of this object is
-// in flux (this method is performing the last steps of re-initialization). As
-// such, random methods are not supposed to work at this point, so whatever this
-// method calls should be relatively well controlled and it may require some
-// degree of "stable state faking".
-void IndexTable::MoveCells(IndexBucket* old_extra_table) {
- int max_hash = (mask_ + 1) / 2;
- int max_bucket = header()->max_bucket;
- header()->max_bucket = mask_;
- int used_cells = header()->used_cells;
-
- // Consider a large cache: a cell stores the upper 18 bits of the hash
- // (h >> 14). If the table is say 8 times the original size (growing from 4x),
- // the bit that we are interested in would be the 3rd bit of the stored value,
- // in other words 'multiplier' >> 1.
- uint32_t new_bit = (1 << extra_bits_) >> 1;
-
- scoped_ptr<IndexBucket[]> old_main_table;
- IndexBucket* source_table = main_table_;
- bool upgrade_format = !extra_bits_;
- if (upgrade_format) {
- // This method should deal with migrating a small table to a big one. Given
- // that the first thing to do is read the old table, set small_table_ for
- // the size of the old table. Now, when moving a cell, the result cannot be
- // placed in the old table or we will end up reading it again and attempting
- // to move it, so we have to copy the whole table at once.
- DCHECK(!small_table_);
- small_table_ = true;
- old_main_table.reset(new IndexBucket[max_hash]);
- memcpy(old_main_table.get(), main_table_, max_hash * sizeof(IndexBucket));
- memset(main_table_, 0, max_hash * sizeof(IndexBucket));
- source_table = old_main_table.get();
- }
-
- for (int i = 0; i < max_hash; i++) {
- int bucket_num = i;
- IndexBucket* bucket = &source_table[i];
- do {
- for (int j = 0; j < kCellsPerBucket; j++) {
- IndexCell& current_cell = bucket->cells[j];
- if (!GetLocation(current_cell))
- continue;
- DCHECK(SanityCheck(current_cell));
- if (bucket_num == i) {
- if (upgrade_format || (GetHashValue(current_cell) & new_bit)) {
- // Move this cell to the upper half of the table.
- MoveSingleCell(&current_cell, bucket_num * kCellsPerBucket + j, i,
- true);
- }
- } else {
- // All cells on extra buckets have to move.
- MoveSingleCell(&current_cell, bucket_num * kCellsPerBucket + j, i,
- true);
- }
- }
-
- // There is no need to clear the old bucket->next value because if falls
- // within the main table so it will be fixed when attempting to follow
- // the link.
- bucket_num = GetNextBucket(max_hash, max_bucket, old_extra_table,
- &bucket);
- } while (bucket_num);
- }
-
- DCHECK_EQ(header()->used_cells, used_cells);
-
- if (upgrade_format) {
- small_table_ = false;
- header()->flags &= ~SMALL_CACHE;
- }
-}
-
-void IndexTable::MoveSingleCell(IndexCell* current_cell, int cell_num,
- int main_table_index, bool growing) {
- uint32_t hash = GetFullHash(*current_cell, main_table_index);
- EntryCell old_cell(cell_num, hash, *current_cell, small_table_);
-
- // This method may be called when moving entries from a small table to a
- // normal table. In that case, the caller (MoveCells) has to read the old
- // table, so it needs small_table_ set to true, but this method needs to
- // write to the new table so small_table_ has to be set to false, and the
- // value restored to true before returning.
- bool upgrade_format = !extra_bits_ && growing;
- if (upgrade_format)
- small_table_ = false;
- EntryCell new_cell = CreateEntryCell(hash, old_cell.GetAddress());
-
- if (!new_cell.IsValid()) {
- // We'll deal with this entry later.
- if (upgrade_format)
- small_table_ = true;
- return;
- }
-
- new_cell.SetState(old_cell.GetState());
- new_cell.SetGroup(old_cell.GetGroup());
- new_cell.SetReuse(old_cell.GetReuse());
- new_cell.SetTimestamp(old_cell.GetTimestamp());
- Save(&new_cell);
- modified_ = true;
- if (upgrade_format)
- small_table_ = true;
-
- if (old_cell.GetState() == ENTRY_DELETED) {
- bitmap_->Set(new_cell.cell_num(), false);
- backup_bitmap_->Set(new_cell.cell_num(), false);
- }
-
- if (!growing || cell_num / kCellsPerBucket == main_table_index) {
- // Only delete entries that live on the main table.
- if (!upgrade_format) {
- old_cell.Clear();
- Write(old_cell);
- }
-
- if (cell_num != new_cell.cell_num()) {
- bitmap_->Set(old_cell.cell_num(), false);
- backup_bitmap_->Set(old_cell.cell_num(), false);
- }
- }
- header()->used_cells--;
-}
-
-void IndexTable::HandleMisplacedCell(IndexCell* current_cell, int cell_num,
- int main_table_index) {
- NOTREACHED(); // No unit tests yet.
-
- // The cell may be misplaced, or a duplicate cell exists with this data.
- uint32_t hash = GetFullHash(*current_cell, main_table_index);
- MoveSingleCell(current_cell, cell_num, main_table_index, false);
-
- // Now look for a duplicate cell.
- CheckBucketList(hash & mask_);
-}
-
-void IndexTable::CheckBucketList(int bucket_num) {
- typedef std::pair<int, EntryGroup> AddressAndGroup;
- std::set<AddressAndGroup> entries;
- IndexBucket* bucket = &main_table_[bucket_num];
- int bucket_hash = bucket_num;
- do {
- for (int i = 0; i < kCellsPerBucket; i++) {
- IndexCell* current_cell = &bucket->cells[i];
- if (!GetLocation(*current_cell))
- continue;
- if (!SanityCheck(*current_cell)) {
- NOTREACHED();
- current_cell->Clear();
- continue;
- }
- int cell_num = bucket_num * kCellsPerBucket + i;
- EntryCell cell(cell_num, GetFullHash(*current_cell, bucket_hash),
- *current_cell, small_table_);
- if (!entries.insert(std::make_pair(cell.GetAddress().value(),
- cell.GetGroup())).second) {
- current_cell->Clear();
- continue;
- }
- CheckState(cell);
- }
-
- bucket_num = GetNextBucket(mask_ + 1, header()->max_bucket, extra_table_,
- &bucket);
- } while (bucket_num);
-}
-
-uint32_t IndexTable::GetLocation(const IndexCell& cell) {
- if (small_table_)
- return GetCellSmallTableLocation(cell);
-
- return GetCellLocation(cell);
-}
-
-uint32_t IndexTable::GetHashValue(const IndexCell& cell) {
- if (small_table_)
- return GetCellSmallTableId(cell);
-
- return GetCellId(cell);
-}
-
-uint32_t IndexTable::GetFullHash(const IndexCell& cell, uint32_t lower_part) {
- // It is OK for the high order bits of lower_part to overlap with the stored
- // part of the hash.
- if (small_table_)
- return (GetCellSmallTableId(cell) << kSmallTableHashShift) | lower_part;
-
- return (GetCellId(cell) << kHashShift) | lower_part;
-}
-
-// All the bits stored in the cell should match the provided hash.
-bool IndexTable::IsHashMatch(const IndexCell& cell, uint32_t hash) {
- hash = small_table_ ? hash >> kSmallTableHashShift : hash >> kHashShift;
- return GetHashValue(cell) == hash;
-}
-
-bool IndexTable::MisplacedHash(const IndexCell& cell, uint32_t hash) {
- if (!extra_bits_)
- return false;
-
- uint32_t mask = (1 << extra_bits_) - 1;
- hash = small_table_ ? hash >> kSmallTableHashShift : hash >> kHashShift;
- return (GetHashValue(cell) & mask) != (hash & mask);
-}
-
-} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/index_table_v3.h b/chromium/net/disk_cache/blockfile/index_table_v3.h
deleted file mode 100644
index 001b43811f5..00000000000
--- a/chromium/net/disk_cache/blockfile/index_table_v3.h
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_INDEX_TABLE_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_INDEX_TABLE_V3_H_
-
-// The IndexTable class is in charge of handling all the details about the main
-// index table of the cache. It provides methods to locate entries in the cache,
-// create new entries and modify existing entries. It hides the fact that the
-// table is backed up across multiple physical files, and that the files can
-// grow and be remapped while the cache is in use. However, note that this class
-// doesn't do any direct management of the backing files, and it operates only
-// with the tables in memory.
-//
-// When the current index needs to grow, the backend is notified so that files
-// are extended and remapped as needed. After that, the IndexTable should be
-// re-initialized with the new structures. Note that the IndexTable instance is
-// still functional while the backend performs file IO.
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "net/base/net_export.h"
-#include "net/disk_cache/blockfile/addr.h"
-#include "net/disk_cache/blockfile/bitmap.h"
-#include "net/disk_cache/blockfile/disk_format_v3.h"
-
-namespace net {
-class IOBuffer;
-}
-
-namespace disk_cache {
-
-class BackendImplV3;
-struct InitResult;
-
-// An EntryCell represents a single entity stored by the index table. Users are
-// expected to handle and store EntryCells on their own to track operations that
-// they are performing with a given entity, as opposed to deal with pointers to
-// individual positions on the table, given that the whole table can be moved to
-// another place, and that would invalidate any pointers to individual cells in
-// the table.
-// However, note that it is also possible for an entity to be moved from one
-// position to another, so an EntryCell may be invalid by the time a long
-// operation completes. In that case, the caller should consult the table again
-// using FindEntryCell().
-class NET_EXPORT_PRIVATE EntryCell {
- public:
- ~EntryCell();
-
- bool IsValid() const;
-
- int32_t cell_num() const { return cell_num_; }
- uint32_t hash() const { return hash_; }
-
- Addr GetAddress() const;
- EntryState GetState() const;
- EntryGroup GetGroup() const;
- int GetReuse() const;
- int GetTimestamp() const;
-
- void SetState(EntryState state);
- void SetGroup(EntryGroup group);
- void SetReuse(int count);
- void SetTimestamp(int timestamp);
-
- static EntryCell GetEntryCellForTest(int32_t cell_num,
- uint32_t hash,
- Addr address,
- IndexCell* cell,
- bool small_table);
- void SerializaForTest(IndexCell* destination);
-
- private:
- friend class IndexTable;
- friend class CacheDumperHelper;
-
- EntryCell();
- EntryCell(int32_t cell_num, uint32_t hash, Addr address, bool small_table);
- EntryCell(int32_t cell_num,
- uint32_t hash,
- const IndexCell& cell,
- bool small_table);
-
- void Clear() { cell_.Clear(); }
- void FixSum();
-
- // Returns the raw value stored on the index table.
- uint32_t GetLocation() const;
-
- // Recalculates hash_ assuming that only the low order bits are valid and the
- // rest come from cell_.
- uint32_t RecomputeHash();
-
- void Serialize(IndexCell* destination) const;
-
- int32_t cell_num_;
- uint32_t hash_;
- IndexCell cell_;
- bool small_table_;
-};
-
-// Keeps a collection of EntryCells in order to be processed.
-struct NET_EXPORT_PRIVATE EntrySet {
- EntrySet();
- EntrySet(const EntrySet& other);
- ~EntrySet();
-
- int evicted_count; // The numebr of evicted entries in this set.
- size_t current; // The number of the cell that is being processed.
- std::vector<EntryCell> cells;
-};
-
-// A given entity referenced by the index table is uniquely identified by the
-// combination of hash and address.
-struct CellInfo {
- uint32_t hash;
- Addr address;
-};
-typedef std::vector<CellInfo> CellList;
-
-// An index iterator is used to get a group of cells that share the same
-// timestamp. When this structure is passed to GetNextCells(), the caller sets
-// the initial timestamp and direction; whet it is used with GetOldest, the
-// initial values are ignored.
-struct NET_EXPORT_PRIVATE IndexIterator {
- IndexIterator();
- ~IndexIterator();
-
- CellList cells;
- int timestamp; // The current low resolution timestamp for |cells|.
- bool forward; // The direction of the iteration, in time.
-};
-
-// Methods that the backend has to implement to support the table. Note that the
-// backend is expected to own all IndexTable instances, so it is expected to
-// outlive the table.
-class NET_EXPORT_PRIVATE IndexTableBackend {
- public:
- virtual ~IndexTableBackend() {}
-
- // The index has to grow.
- virtual void GrowIndex() = 0;
-
- // Save the index to the backup file.
- virtual void SaveIndex(net::IOBuffer* buffer, int buffer_len) = 0;
-
- // Deletes or fixes an invalid cell from the backend.
- virtual void DeleteCell(EntryCell cell) = 0;
- virtual void FixCell(EntryCell cell) = 0;
-};
-
-// The data required to initialize an index. Note that not all fields have to
-// be provided when growing the tables.
-struct NET_EXPORT_PRIVATE IndexTableInitData {
- IndexTableInitData();
- ~IndexTableInitData();
-
- IndexBitmap* index_bitmap;
- IndexBucket* main_table;
- IndexBucket* extra_table;
- scoped_ptr<IndexHeaderV3> backup_header;
- scoped_ptr<uint32_t[]> backup_bitmap;
-};
-
-// See the description at the top of this file.
-class NET_EXPORT_PRIVATE IndexTable {
- public:
- explicit IndexTable(IndexTableBackend* backend);
- ~IndexTable();
-
- // Initializes the object, or re-initializes it when the backing files grow.
- // Note that the only supported way to initialize this objeect is using
- // pointers that come from the files being directly mapped in memory. If that
- // is not the case, it must be emulated in a convincing way, for example
- // making sure that the tables for re-init look the same as the tables to be
- // replaced.
- void Init(IndexTableInitData* params);
-
- // Releases the resources acquired during Init().
- void Shutdown();
-
- // Locates a resouce on the index. Returns a list of all resources that match
- // the provided hash.
- EntrySet LookupEntries(uint32_t hash);
-
- // Creates a new cell to store a new resource.
- EntryCell CreateEntryCell(uint32_t hash, Addr address);
-
- // Locates a particular cell. This method allows a caller to perform slow
- // operations with some entries while the index evolves, by returning the
- // current state of a cell. If the desired cell cannot be located, the return
- // object will be invalid.
- EntryCell FindEntryCell(uint32_t hash, Addr address);
-
- // Returns an IndexTable timestamp for a given absolute time. The actual
- // resolution of the timestamp should be considered an implementation detail,
- // but it certainly is lower than seconds. The important part is that a group
- // of cells will share the same timestamp (see IndexIterator).
- int CalculateTimestamp(base::Time time);
-
- // Returns the equivalent time for a cell timestamp.
- base::Time TimeFromTimestamp(int timestamp);
-
- // Updates a particular cell.
- void SetSate(uint32_t hash, Addr address, EntryState state);
- void UpdateTime(uint32_t hash, Addr address, base::Time current);
-
- // Saves the contents of |cell| to the table.
- void Save(EntryCell* cell);
-
- // Returns the oldest entries for each group of entries. The initial values
- // for the provided iterators are ignored. Entries are assigned to iterators
- // according to their EntryGroup.
- void GetOldest(IndexIterator* no_use,
- IndexIterator* low_use,
- IndexIterator* high_use);
-
- // Returns the next group of entries for the provided iterator. This method
- // does not return the cells matching the initial iterator's timestamp,
- // but rather cells after (or before, depending on the iterator's |forward|
- // member) that timestamp.
- bool GetNextCells(IndexIterator* iterator);
-
- // Called each time the index should save the backup information. The caller
- // can assume that anything that needs to be saved is saved when this method
- // is called, and that there is only one source of timming information, and
- // that source is controlled by the owner of this object.
- void OnBackupTimer();
-
- IndexHeaderV3* header() { return header_; }
- const IndexHeaderV3* header() const { return header_; }
-
- private:
- EntryCell FindEntryCellImpl(uint32_t hash, Addr address, bool allow_deleted);
- void CheckState(const EntryCell& cell);
- void Write(const EntryCell& cell);
- int NewExtraBucket();
- void WalkTables(int limit_time,
- IndexIterator* no_use,
- IndexIterator* low_use,
- IndexIterator* high_use);
- void UpdateFromBucket(IndexBucket* bucket, int bucket_hash,
- int limit_time,
- IndexIterator* no_use,
- IndexIterator* low_use,
- IndexIterator* high_use);
- void MoveCells(IndexBucket* old_extra_table);
- void MoveSingleCell(IndexCell* current_cell, int cell_num,
- int main_table_index, bool growing);
- void HandleMisplacedCell(IndexCell* current_cell, int cell_num,
- int main_table_index);
- void CheckBucketList(int bucket_id);
-
- uint32_t GetLocation(const IndexCell& cell);
- uint32_t GetHashValue(const IndexCell& cell);
- uint32_t GetFullHash(const IndexCell& cell, uint32_t lower_part);
- bool IsHashMatch(const IndexCell& cell, uint32_t hash);
- bool MisplacedHash(const IndexCell& cell, uint32_t hash);
-
- IndexTableBackend* backend_;
- IndexHeaderV3* header_;
- scoped_ptr<Bitmap> bitmap_;
- scoped_ptr<Bitmap> backup_bitmap_;
- scoped_ptr<uint32_t[]> backup_bitmap_storage_;
- scoped_ptr<IndexHeaderV3> backup_header_;
- IndexBucket* main_table_;
- IndexBucket* extra_table_;
- uint32_t mask_; // Binary mask to map a hash to the hash table.
- int extra_bits_; // How many bits are in mask_ above the default value.
- bool modified_;
- bool small_table_;
-
- DISALLOW_COPY_AND_ASSIGN(IndexTable);
-};
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_INDEX_TABLE_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/index_table_v3_unittest.cc b/chromium/net/disk_cache/blockfile/index_table_v3_unittest.cc
deleted file mode 100644
index e2fca1063e4..00000000000
--- a/chromium/net/disk_cache/blockfile/index_table_v3_unittest.cc
+++ /dev/null
@@ -1,711 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/index_table_v3.h"
-
-#include <stdint.h>
-#include <utility>
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "net/disk_cache/blockfile/addr.h"
-#include "net/disk_cache/blockfile/disk_format_v3.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using disk_cache::EntryCell;
-using disk_cache::IndexCell;
-using disk_cache::IndexTable;
-using disk_cache::IndexTableInitData;
-
-namespace {
-
-int GetChecksum(const IndexCell& source) {
- // Only the cell pointer is relevant.
- disk_cache::Addr addr;
- IndexCell* cell = const_cast<IndexCell*>(&source);
- EntryCell entry = EntryCell::GetEntryCellForTest(0, 0, addr, cell, false);
-
- IndexCell result;
- entry.SerializaForTest(&result);
- return result.last_part >> 6;
-}
-
-class MockIndexBackend : public disk_cache::IndexTableBackend {
- public:
- MockIndexBackend() : grow_called_(false), buffer_len_(-1) {}
- ~MockIndexBackend() override {}
-
- bool grow_called() const { return grow_called_; }
- int buffer_len() const { return buffer_len_; }
-
- void GrowIndex() override { grow_called_ = true; }
- void SaveIndex(net::IOBuffer* buffer, int buffer_len) override {
- buffer_len_ = buffer_len;
- }
- void DeleteCell(EntryCell cell) override {}
- void FixCell(EntryCell cell) override {}
-
- private:
- bool grow_called_;
- int buffer_len_;
-};
-
-class TestCacheTables {
- public:
- // |num_entries| is the capacity of the main table. The extra table is half
- // the size of the main table.
- explicit TestCacheTables(int num_entries);
- ~TestCacheTables() {}
-
- void GetInitData(IndexTableInitData* result);
- void CopyFrom(const TestCacheTables& other);
- base::Time start_time() const { return start_time_; }
-
- private:
- scoped_ptr<uint64_t[]> main_bitmap_;
- scoped_ptr<disk_cache::IndexBucket[]> main_table_;
- scoped_ptr<disk_cache::IndexBucket[]> extra_table_;
- base::Time start_time_;
- int num_bitmap_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(TestCacheTables);
-};
-
-TestCacheTables::TestCacheTables(int num_entries) {
- DCHECK_GE(num_entries, 1024);
- DCHECK_EQ(num_entries, num_entries / 1024 * 1024);
- main_table_.reset(new disk_cache::IndexBucket[num_entries]);
- extra_table_.reset(new disk_cache::IndexBucket[num_entries / 2]);
- memset(main_table_.get(), 0, num_entries * sizeof(*main_table_.get()));
- memset(extra_table_.get(), 0, num_entries / 2 * sizeof(*extra_table_.get()));
-
- // We allow IndexBitmap smaller than a page because the code should not really
- // depend on that.
- num_bitmap_bytes_ = (num_entries + num_entries / 2) / 8;
- size_t required_size = sizeof(disk_cache::IndexHeaderV3) + num_bitmap_bytes_;
- main_bitmap_.reset(new uint64_t[required_size / sizeof(uint64_t)]);
- memset(main_bitmap_.get(), 0, required_size);
-
- disk_cache::IndexHeaderV3* header =
- reinterpret_cast<disk_cache::IndexHeaderV3*>(main_bitmap_.get());
-
- header->magic = disk_cache::kIndexMagicV3;
- header->version = disk_cache::kVersion3;
- header->table_len = num_entries + num_entries / 2;
- header->max_bucket = num_entries / 4 - 1;
-
- start_time_ = base::Time::Now();
- header->create_time = start_time_.ToInternalValue();
- header->base_time =
- (start_time_ - base::TimeDelta::FromDays(20)).ToInternalValue();
-
- if (num_entries < 64 * 1024)
- header->flags = disk_cache::SMALL_CACHE;
-}
-
-void TestCacheTables::GetInitData(IndexTableInitData* result) {
- result->index_bitmap =
- reinterpret_cast<disk_cache::IndexBitmap*>(main_bitmap_.get());
-
- result->main_table = main_table_.get();
- result->extra_table = extra_table_.get();
-
- result->backup_header.reset(new disk_cache::IndexHeaderV3);
- memcpy(result->backup_header.get(), result->index_bitmap,
- sizeof(result->index_bitmap->header));
-
- result->backup_bitmap.reset(
- new uint32_t[num_bitmap_bytes_ / sizeof(uint32_t)]);
- memcpy(result->backup_bitmap.get(), result->index_bitmap->bitmap,
- num_bitmap_bytes_);
-}
-
-void TestCacheTables::CopyFrom(const TestCacheTables& other) {
- disk_cache::IndexBitmap* this_bitmap =
- reinterpret_cast<disk_cache::IndexBitmap*>(main_bitmap_.get());
- disk_cache::IndexBitmap* other_bitmap =
- reinterpret_cast<disk_cache::IndexBitmap*>(other.main_bitmap_.get());
-
- DCHECK_GE(this_bitmap->header.table_len, other_bitmap->header.table_len);
- DCHECK_GE(num_bitmap_bytes_, other.num_bitmap_bytes_);
-
- memcpy(this_bitmap->bitmap, other_bitmap->bitmap, other.num_bitmap_bytes_);
-
- int main_table_buckets = (other_bitmap->header.table_len * 2 / 3) / 4;
- int extra_table_buckets = (other_bitmap->header.table_len * 1 / 3) / 4;
- memcpy(main_table_.get(), other.main_table_.get(),
- main_table_buckets * sizeof(disk_cache::IndexBucket));
- memcpy(extra_table_.get(), other.extra_table_.get(),
- extra_table_buckets * sizeof(disk_cache::IndexBucket));
-
- this_bitmap->header.num_entries = other_bitmap->header.num_entries;
- this_bitmap->header.used_cells = other_bitmap->header.used_cells;
- this_bitmap->header.max_bucket = other_bitmap->header.max_bucket;
- this_bitmap->header.create_time = other_bitmap->header.create_time;
- this_bitmap->header.base_time = other_bitmap->header.base_time;
- this_bitmap->header.flags = other_bitmap->header.flags;
- start_time_ = other.start_time_;
-}
-
-} // namespace
-
-TEST(DiskCacheIndexTable, EntryCell) {
- uint32_t hash = 0x55aa6699;
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, 0x4531);
- bool small_table = true;
- int cell_num = 88;
- int reuse = 6;
- int timestamp = 123456;
- disk_cache::EntryState state = disk_cache::ENTRY_MODIFIED;
- disk_cache::EntryGroup group = disk_cache::ENTRY_HIGH_USE;
-
- for (int i = 0; i < 4; i++) {
- SCOPED_TRACE(i);
- EntryCell entry = EntryCell::GetEntryCellForTest(cell_num, hash, addr, NULL,
- small_table);
- EXPECT_EQ(disk_cache::ENTRY_NO_USE, entry.GetGroup());
- EXPECT_EQ(disk_cache::ENTRY_NEW, entry.GetState());
-
- entry.SetGroup(group);
- entry.SetState(state);
- entry.SetReuse(reuse);
- entry.SetTimestamp(timestamp);
-
- EXPECT_TRUE(entry.IsValid());
- EXPECT_EQ(hash, entry.hash());
- EXPECT_EQ(cell_num, entry.cell_num());
- EXPECT_EQ(addr.value(), entry.GetAddress().value());
-
- EXPECT_EQ(group, entry.GetGroup());
- EXPECT_EQ(state, entry.GetState());
- EXPECT_EQ(reuse, entry.GetReuse());
- EXPECT_EQ(timestamp, entry.GetTimestamp());
-
- // Store the data and read it again.
- IndexCell cell;
- entry.SerializaForTest(&cell);
-
- EntryCell entry2 = EntryCell::GetEntryCellForTest(cell_num, hash, addr,
- &cell, small_table);
-
- EXPECT_EQ(addr.value(), entry2.GetAddress().value());
-
- EXPECT_EQ(group, entry2.GetGroup());
- EXPECT_EQ(state, entry2.GetState());
- EXPECT_EQ(reuse, entry2.GetReuse());
- EXPECT_EQ(timestamp, entry2.GetTimestamp());
-
- small_table = !small_table;
- if (i == 1) {
- hash = ~hash;
- cell_num *= 5;
- state = disk_cache::ENTRY_USED;
- group = disk_cache::ENTRY_EVICTED;
- addr = disk_cache::Addr(disk_cache::BLOCK_EVICTED, 1, 6, 0x18a5);
- reuse = 15; // 4 bits
- timestamp = 0xfffff; // 20 bits.
- }
- }
-}
-
-// Goes over some significant values for a cell's sum.
-TEST(DiskCacheIndexTable, EntryCellSum) {
- IndexCell source;
- source.Clear();
- EXPECT_EQ(0, GetChecksum(source));
-
- source.first_part++;
- EXPECT_EQ(1, GetChecksum(source));
-
- source.Clear();
- source.last_part = 0x80;
- EXPECT_EQ(0, GetChecksum(source));
-
- source.last_part = 0x55;
- EXPECT_EQ(3, GetChecksum(source));
-
- source.first_part = 0x555555;
- EXPECT_EQ(2, GetChecksum(source));
-
- source.last_part = 0;
- EXPECT_EQ(1, GetChecksum(source));
-
- source.first_part = UINT64_C(0x8000000080000000);
- EXPECT_EQ(0, GetChecksum(source));
-
- source.first_part = UINT64_C(0x4000000040000000);
- EXPECT_EQ(2, GetChecksum(source));
-
- source.first_part = UINT64_C(0x200000020000000);
- EXPECT_EQ(1, GetChecksum(source));
-
- source.first_part = UINT64_C(0x100000010010000);
- EXPECT_EQ(3, GetChecksum(source));
-
- source.first_part = 0x80008000;
- EXPECT_EQ(0, GetChecksum(source));
-
- source.first_part = UINT64_C(0x800000008000);
- EXPECT_EQ(1, GetChecksum(source));
-
- source.first_part = 0x8080;
- EXPECT_EQ(0, GetChecksum(source));
-
- source.first_part = 0x800080;
- EXPECT_EQ(1, GetChecksum(source));
-
- source.first_part = 0x88;
- EXPECT_EQ(0, GetChecksum(source));
-
- source.first_part = 0x808;
- EXPECT_EQ(1, GetChecksum(source));
-
- source.first_part = 0xA;
- EXPECT_EQ(0, GetChecksum(source));
-
- source.first_part = 0x22;
- EXPECT_EQ(1, GetChecksum(source));
-}
-
-TEST(DiskCacheIndexTable, Basics) {
- TestCacheTables cache(1024);
- IndexTableInitData init_data;
- cache.GetInitData(&init_data);
-
- IndexTable index(NULL);
- index.Init(&init_data);
-
- // Write some entries.
- disk_cache::CellList entries;
- for (int i = 0; i < 250; i++) {
- SCOPED_TRACE(i);
- uint32_t hash = i * i * 1111 + i * 11;
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, i * 13 + 1);
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
-
- disk_cache::CellInfo info = { hash, addr };
- entries.push_back(info);
- }
-
- // Read them back.
- for (size_t i = 0; i < entries.size(); i++) {
- SCOPED_TRACE(i);
- uint32_t hash = entries[i].hash;
- disk_cache::Addr addr = entries[i].address;
-
- disk_cache::EntrySet found_entries = index.LookupEntries(hash);
- ASSERT_EQ(1u, found_entries.cells.size());
- EXPECT_TRUE(found_entries.cells[0].IsValid());
- EXPECT_EQ(hash, found_entries.cells[0].hash());
- EXPECT_EQ(addr.value(), found_entries.cells[0].GetAddress().value());
-
- EntryCell entry = index.FindEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
- EXPECT_EQ(hash, entry.hash());
- EXPECT_EQ(addr.value(), entry.GetAddress().value());
-
- // Delete the first 100 entries.
- if (i < 100)
- index.SetSate(hash, addr, disk_cache::ENTRY_DELETED);
- }
-
- // See what we have now.
- for (size_t i = 0; i < entries.size(); i++) {
- SCOPED_TRACE(i);
- uint32_t hash = entries[i].hash;
- disk_cache::Addr addr = entries[i].address;
-
- disk_cache::EntrySet found_entries = index.LookupEntries(hash);
- if (i < 100) {
- EXPECT_EQ(0u, found_entries.cells.size());
- } else {
- ASSERT_EQ(1u, found_entries.cells.size());
- EXPECT_TRUE(found_entries.cells[0].IsValid());
- EXPECT_EQ(hash, found_entries.cells[0].hash());
- EXPECT_EQ(addr.value(), found_entries.cells[0].GetAddress().value());
- }
- }
-}
-
-// Tests handling of multiple entries with the same hash.
-TEST(DiskCacheIndexTable, SameHash) {
- TestCacheTables cache(1024);
- IndexTableInitData init_data;
- cache.GetInitData(&init_data);
-
- IndexTable index(NULL);
- index.Init(&init_data);
-
- disk_cache::CellList entries;
- uint32_t hash = 0x55aa55bb;
- for (int i = 0; i < 6; i++) {
- SCOPED_TRACE(i);
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, i * 13 + 1);
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
-
- disk_cache::CellInfo info = { hash, addr };
- entries.push_back(info);
- }
-
- disk_cache::EntrySet found_entries = index.LookupEntries(hash);
- EXPECT_EQ(0, found_entries.evicted_count);
- ASSERT_EQ(6u, found_entries.cells.size());
-
- for (size_t i = 0; i < found_entries.cells.size(); i++) {
- SCOPED_TRACE(i);
- EXPECT_EQ(entries[i].address, found_entries.cells[i].GetAddress());
- }
-
- // Now verify handling of entries on different states.
- index.SetSate(hash, entries[0].address, disk_cache::ENTRY_DELETED);
- index.SetSate(hash, entries[1].address, disk_cache::ENTRY_DELETED);
- index.SetSate(hash, entries[2].address, disk_cache::ENTRY_USED);
- index.SetSate(hash, entries[3].address, disk_cache::ENTRY_USED);
- index.SetSate(hash, entries[4].address, disk_cache::ENTRY_USED);
-
- found_entries = index.LookupEntries(hash);
- EXPECT_EQ(0, found_entries.evicted_count);
- ASSERT_EQ(4u, found_entries.cells.size());
-
- index.SetSate(hash, entries[3].address, disk_cache::ENTRY_OPEN);
- index.SetSate(hash, entries[4].address, disk_cache::ENTRY_OPEN);
-
- found_entries = index.LookupEntries(hash);
- EXPECT_EQ(0, found_entries.evicted_count);
- ASSERT_EQ(4u, found_entries.cells.size());
-
- index.SetSate(hash, entries[4].address, disk_cache::ENTRY_MODIFIED);
-
- found_entries = index.LookupEntries(hash);
- EXPECT_EQ(0, found_entries.evicted_count);
- ASSERT_EQ(4u, found_entries.cells.size());
-
- index.SetSate(hash, entries[1].address, disk_cache::ENTRY_FREE);
-
- found_entries = index.LookupEntries(hash);
- EXPECT_EQ(0, found_entries.evicted_count);
- ASSERT_EQ(4u, found_entries.cells.size());
-
- // FindEntryCell should not see deleted entries.
- EntryCell entry = index.FindEntryCell(hash, entries[0].address);
- EXPECT_FALSE(entry.IsValid());
-
- // A free entry is gone.
- entry = index.FindEntryCell(hash, entries[1].address);
- EXPECT_FALSE(entry.IsValid());
-
- // Locate a used entry, and evict it. This is not really a correct operation
- // in that an existing cell doesn't transition to evicted; instead a new cell
- // for the evicted entry (on a different block file) should be created. Still,
- // at least evicted_count would be valid.
- entry = index.FindEntryCell(hash, entries[2].address);
- EXPECT_TRUE(entry.IsValid());
- entry.SetGroup(disk_cache::ENTRY_EVICTED);
- index.Save(&entry);
-
- found_entries = index.LookupEntries(hash);
- EXPECT_EQ(1, found_entries.evicted_count);
- ASSERT_EQ(4u, found_entries.cells.size());
-
- // Now use the proper way to get an evicted entry.
- disk_cache::Addr addr2(disk_cache::BLOCK_EVICTED, 1, 6, 6); // Any address.
- entry = index.CreateEntryCell(hash, addr2);
- EXPECT_TRUE(entry.IsValid());
- EXPECT_EQ(disk_cache::ENTRY_EVICTED, entry.GetGroup());
-
- found_entries = index.LookupEntries(hash);
- EXPECT_EQ(2, found_entries.evicted_count);
- ASSERT_EQ(5u, found_entries.cells.size());
-}
-
-TEST(DiskCacheIndexTable, Timestamps) {
- TestCacheTables cache(1024);
- IndexTableInitData init_data;
- cache.GetInitData(&init_data);
-
- IndexTable index(NULL);
- index.Init(&init_data);
-
- // The granularity should be 1 minute.
- int timestamp1 = index.CalculateTimestamp(cache.start_time());
- int timestamp2 = index.CalculateTimestamp(cache.start_time() +
- base::TimeDelta::FromSeconds(59));
- EXPECT_EQ(timestamp1, timestamp2);
-
- int timestamp3 = index.CalculateTimestamp(cache.start_time() +
- base::TimeDelta::FromSeconds(61));
- EXPECT_EQ(timestamp1 + 1, timestamp3);
-
- int timestamp4 = index.CalculateTimestamp(cache.start_time() +
- base::TimeDelta::FromSeconds(119));
- EXPECT_EQ(timestamp1 + 1, timestamp4);
-
- int timestamp5 = index.CalculateTimestamp(cache.start_time() +
- base::TimeDelta::FromSeconds(121));
- EXPECT_EQ(timestamp1 + 2, timestamp5);
-
- int timestamp6 = index.CalculateTimestamp(cache.start_time() -
- base::TimeDelta::FromSeconds(30));
- EXPECT_EQ(timestamp1 - 1, timestamp6);
-
- // The base should be 20 days in the past.
- int timestamp7 = index.CalculateTimestamp(cache.start_time() -
- base::TimeDelta::FromDays(20));
- int timestamp8 = index.CalculateTimestamp(cache.start_time() -
- base::TimeDelta::FromDays(35));
- EXPECT_EQ(timestamp7, timestamp8);
- EXPECT_EQ(0, timestamp8);
-
- int timestamp9 = index.CalculateTimestamp(cache.start_time() -
- base::TimeDelta::FromDays(19));
- EXPECT_NE(0, timestamp9);
-}
-
-// Tests GetOldest and GetNextCells.
-TEST(DiskCacheIndexTable, Iterations) {
- TestCacheTables cache(1024);
- IndexTableInitData init_data;
- cache.GetInitData(&init_data);
-
- IndexTable index(NULL);
- index.Init(&init_data);
-
- base::Time time = cache.start_time();
-
- // Write some entries.
- disk_cache::CellList entries;
- for (int i = 0; i < 44; i++) {
- SCOPED_TRACE(i);
- uint32_t hash = i; // The entries will be ordered on the table.
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, i * 13 + 1);
- if (i < 10 || i == 40)
- addr = disk_cache::Addr(disk_cache::BLOCK_EVICTED, 1, 6, i * 13 + 1);
-
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
-
- disk_cache::CellInfo info = { hash, addr };
- entries.push_back(info);
-
- if (i < 10 || i == 40) {
- // Do nothing. These are ENTRY_EVICTED by default.
- } else if (i < 20 || i == 41) {
- entry.SetGroup(disk_cache::ENTRY_HIGH_USE);
- index.Save(&entry);
- } else if (i < 30 || i == 42) {
- entry.SetGroup(disk_cache::ENTRY_LOW_USE);
- index.Save(&entry);
- }
-
- // Entries [30,39] and 43 are marked as ENTRY_NO_USE (the default).
-
- if (!(i % 10))
- time += base::TimeDelta::FromMinutes(1);
-
- index.UpdateTime(hash, addr, time);
- }
-
- // Get the oldest entries of each group.
- disk_cache::IndexIterator no_use, low_use, high_use;
- index.GetOldest(&no_use, &low_use, &high_use);
- ASSERT_EQ(10u, no_use.cells.size());
- ASSERT_EQ(10u, low_use.cells.size());
- ASSERT_EQ(10u, high_use.cells.size());
-
- EXPECT_EQ(entries[10].hash, high_use.cells[0].hash);
- EXPECT_EQ(entries[19].hash, high_use.cells[9].hash);
- EXPECT_EQ(entries[20].hash, low_use.cells[0].hash);
- EXPECT_EQ(entries[29].hash, low_use.cells[9].hash);
- EXPECT_EQ(entries[30].hash, no_use.cells[0].hash);
- EXPECT_EQ(entries[39].hash, no_use.cells[9].hash);
-
- // Now start an iteration from the head (most recent entry).
- disk_cache::IndexIterator iterator;
- iterator.timestamp = index.CalculateTimestamp(time) + 1;
- iterator.forward = false; // Back in time.
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(3u, iterator.cells.size());
- EXPECT_EQ(entries[41].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[42].hash, iterator.cells[1].hash);
- EXPECT_EQ(entries[43].hash, iterator.cells[2].hash);
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(10u, iterator.cells.size());
- EXPECT_EQ(entries[30].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[39].hash, iterator.cells[9].hash);
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(10u, iterator.cells.size());
- EXPECT_EQ(entries[20].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[29].hash, iterator.cells[9].hash);
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(10u, iterator.cells.size());
- EXPECT_EQ(entries[10].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[19].hash, iterator.cells[9].hash);
-
- ASSERT_FALSE(index.GetNextCells(&iterator));
-
- // Now start an iteration from the tail (oldest entry).
- iterator.timestamp = 0;
- iterator.forward = true;
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(10u, iterator.cells.size());
- EXPECT_EQ(entries[10].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[19].hash, iterator.cells[9].hash);
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(10u, iterator.cells.size());
- EXPECT_EQ(entries[20].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[29].hash, iterator.cells[9].hash);
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(10u, iterator.cells.size());
- EXPECT_EQ(entries[30].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[39].hash, iterator.cells[9].hash);
-
- ASSERT_TRUE(index.GetNextCells(&iterator));
- ASSERT_EQ(3u, iterator.cells.size());
- EXPECT_EQ(entries[41].hash, iterator.cells[0].hash);
- EXPECT_EQ(entries[42].hash, iterator.cells[1].hash);
- EXPECT_EQ(entries[43].hash, iterator.cells[2].hash);
-}
-
-// Tests doubling of the table.
-TEST(DiskCacheIndexTable, Doubling) {
- IndexTable index(NULL);
- int size = 1024;
- scoped_ptr<TestCacheTables> cache(new TestCacheTables(size));
- int entry_id = 0;
- disk_cache::CellList entries;
-
- // Go from 1024 to 256k cells.
- for (int resizes = 0; resizes <= 8; resizes++) {
- scoped_ptr<TestCacheTables> old_cache(std::move(cache));
- cache.reset(new TestCacheTables(size));
- cache.get()->CopyFrom(*old_cache.get());
-
- IndexTableInitData init_data;
- cache.get()->GetInitData(&init_data);
- index.Init(&init_data);
-
- // Write some entries.
- for (int i = 0; i < 250; i++, entry_id++) {
- SCOPED_TRACE(entry_id);
- uint32_t hash = entry_id * i * 321 + entry_id * 13;
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, entry_id * 17 + 1);
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
-
- disk_cache::CellInfo info = { hash, addr };
- entries.push_back(info);
- }
- size *= 2;
- }
-
- // Access all the entries.
- for (size_t i = 0; i < entries.size(); i++) {
- SCOPED_TRACE(i);
- disk_cache::EntrySet found_entries = index.LookupEntries(entries[i].hash);
- ASSERT_EQ(1u, found_entries.cells.size());
- EXPECT_TRUE(found_entries.cells[0].IsValid());
- }
-}
-
-// Tests bucket chaining when growing the index.
-TEST(DiskCacheIndexTable, BucketChains) {
- IndexTable index(NULL);
- int size = 1024;
- scoped_ptr<TestCacheTables> cache(new TestCacheTables(size));
- disk_cache::CellList entries;
-
- IndexTableInitData init_data;
- cache.get()->GetInitData(&init_data);
- index.Init(&init_data);
-
- // Write some entries.
- for (int i = 0; i < 8; i++) {
- SCOPED_TRACE(i);
- uint32_t hash = i * 256;
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, i * 7 + 1);
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
-
- disk_cache::CellInfo info = { hash, addr };
- entries.push_back(info);
- }
-
- // Double the size.
- scoped_ptr<TestCacheTables> old_cache(std::move(cache));
- cache.reset(new TestCacheTables(size * 2));
- cache.get()->CopyFrom(*old_cache.get());
-
- cache.get()->GetInitData(&init_data);
- index.Init(&init_data);
-
- // Write more entries, starting with the upper half of the table.
- for (int i = 9; i < 11; i++) {
- SCOPED_TRACE(i);
- uint32_t hash = i * 256;
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, i * 7 + 1);
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
-
- disk_cache::CellInfo info = { hash, addr };
- entries.push_back(info);
- }
-
- // Access all the entries.
- for (size_t i = 0; i < entries.size(); i++) {
- SCOPED_TRACE(i);
- disk_cache::EntrySet found_entries = index.LookupEntries(entries[i].hash);
- ASSERT_EQ(1u, found_entries.cells.size());
- EXPECT_TRUE(found_entries.cells[0].IsValid());
- }
-}
-
-// Tests that GrowIndex is called.
-TEST(DiskCacheIndexTable, GrowIndex) {
- TestCacheTables cache(1024);
- IndexTableInitData init_data;
- cache.GetInitData(&init_data);
- MockIndexBackend backend;
-
- IndexTable index(&backend);
- index.Init(&init_data);
-
- // Write some entries.
- for (int i = 0; i < 512; i++) {
- SCOPED_TRACE(i);
- uint32_t hash = 0;
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, i + 1);
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
- }
-
- EXPECT_TRUE(backend.grow_called());
-}
-
-TEST(DiskCacheIndexTable, SaveIndex) {
- TestCacheTables cache(1024);
- IndexTableInitData init_data;
- cache.GetInitData(&init_data);
- MockIndexBackend backend;
-
- IndexTable index(&backend);
- index.Init(&init_data);
-
- uint32_t hash = 0;
- disk_cache::Addr addr(disk_cache::BLOCK_ENTRIES, 1, 5, 6);
- EntryCell entry = index.CreateEntryCell(hash, addr);
- EXPECT_TRUE(entry.IsValid());
-
- index.OnBackupTimer();
- int expected = (1024 + 512) / 8 + sizeof(disk_cache::IndexHeaderV3);
- EXPECT_EQ(expected, backend.buffer_len());
-}
diff --git a/chromium/net/disk_cache/blockfile/mapped_file.cc b/chromium/net/disk_cache/blockfile/mapped_file.cc
index d26cead89aa..571f21fed16 100644
--- a/chromium/net/disk_cache/blockfile/mapped_file.cc
+++ b/chromium/net/disk_cache/blockfile/mapped_file.cc
@@ -5,8 +5,7 @@
#include "net/disk_cache/blockfile/mapped_file.h"
#include <algorithm>
-
-#include "base/memory/scoped_ptr.h"
+#include <memory>
namespace disk_cache {
@@ -38,7 +37,7 @@ bool MappedFile::Store(const FileBlock* block,
bool MappedFile::Preload() {
size_t file_len = GetLength();
- scoped_ptr<char[]> buf(new char[file_len]);
+ std::unique_ptr<char[]> buf(new char[file_len]);
if (!Read(buf.get(), file_len, 0))
return false;
return true;
diff --git a/chromium/net/disk_cache/blockfile/mapped_file_posix.cc b/chromium/net/disk_cache/blockfile/mapped_file_posix.cc
index 5ff2824fca6..99cf49c1890 100644
--- a/chromium/net/disk_cache/blockfile/mapped_file_posix.cc
+++ b/chromium/net/disk_cache/blockfile/mapped_file_posix.cc
@@ -31,7 +31,7 @@ void* MappedFile::Init(const base::FilePath& name, size_t size) {
buffer_ = 0;
// Make sure we detect hardware failures reading the headers.
- scoped_ptr<char[]> temp(new char[temp_len]);
+ std::unique_ptr<char[]> temp(new char[temp_len]);
if (!Read(temp.get(), temp_len, 0))
return NULL;
diff --git a/chromium/net/disk_cache/blockfile/mapped_file_win.cc b/chromium/net/disk_cache/blockfile/mapped_file_win.cc
index d725238e32e..9a128a36d0e 100644
--- a/chromium/net/disk_cache/blockfile/mapped_file_win.cc
+++ b/chromium/net/disk_cache/blockfile/mapped_file_win.cc
@@ -4,9 +4,10 @@
#include "net/disk_cache/blockfile/mapped_file.h"
+#include <memory>
+
#include "base/files/file_path.h"
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
#include "net/disk_cache/disk_cache.h"
namespace disk_cache {
@@ -29,7 +30,7 @@ void* MappedFile::Init(const base::FilePath& name, size_t size) {
// Make sure we detect hardware failures reading the headers.
size_t temp_len = size ? size : 4096;
- scoped_ptr<char[]> temp(new char[temp_len]);
+ std::unique_ptr<char[]> temp(new char[temp_len]);
if (!Read(temp.get(), temp_len, 0))
return NULL;
diff --git a/chromium/net/disk_cache/blockfile/rankings.cc b/chromium/net/disk_cache/blockfile/rankings.cc
index 387950f3567..c5593508ab4 100644
--- a/chromium/net/disk_cache/blockfile/rankings.cc
+++ b/chromium/net/disk_cache/blockfile/rankings.cc
@@ -202,9 +202,9 @@ Rankings::ScopedRankingsBlock::ScopedRankingsBlock() : rankings_(NULL) {}
Rankings::ScopedRankingsBlock::ScopedRankingsBlock(Rankings* rankings)
: rankings_(rankings) {}
-Rankings::ScopedRankingsBlock::ScopedRankingsBlock(
- Rankings* rankings, CacheRankingsBlock* node)
- : scoped_ptr<CacheRankingsBlock>(node), rankings_(rankings) {}
+Rankings::ScopedRankingsBlock::ScopedRankingsBlock(Rankings* rankings,
+ CacheRankingsBlock* node)
+ : std::unique_ptr<CacheRankingsBlock>(node), rankings_(rankings) {}
Rankings::Iterator::Iterator() {
memset(this, 0, sizeof(Iterator));
@@ -542,8 +542,8 @@ bool Rankings::SanityCheck(CacheRankingsBlock* node, bool from_list) const {
Addr next_addr(data->next);
Addr prev_addr(data->prev);
- if (!next_addr.SanityCheckV2() || next_addr.file_type() != RANKINGS ||
- !prev_addr.SanityCheckV2() || prev_addr.file_type() != RANKINGS)
+ if (!next_addr.SanityCheck() || next_addr.file_type() != RANKINGS ||
+ !prev_addr.SanityCheck() || prev_addr.file_type() != RANKINGS)
return false;
return true;
@@ -827,7 +827,7 @@ int Rankings::CheckListSection(List list, Addr end1, Addr end2, bool forward,
if (!current.SanityCheckForRankings())
return ERR_INVALID_HEAD;
- scoped_ptr<CacheRankingsBlock> node;
+ std::unique_ptr<CacheRankingsBlock> node;
Addr prev_addr(current);
do {
node.reset(new CacheRankingsBlock(backend_->File(current), current));
diff --git a/chromium/net/disk_cache/blockfile/rankings.h b/chromium/net/disk_cache/blockfile/rankings.h
index 637f6a564d6..66ba7b87603 100644
--- a/chromium/net/disk_cache/blockfile/rankings.h
+++ b/chromium/net/disk_cache/blockfile/rankings.h
@@ -8,9 +8,9 @@
#define NET_DISK_CACHE_BLOCKFILE_RANKINGS_H_
#include <list>
+#include <memory>
#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
#include "net/disk_cache/blockfile/addr.h"
#include "net/disk_cache/blockfile/mapped_file.h"
#include "net/disk_cache/blockfile/storage_block.h"
@@ -66,7 +66,7 @@ class Rankings {
// This class provides a specialized version of scoped_ptr, that calls
// Rankings whenever a CacheRankingsBlock is deleted, to keep track of cache
// iterators that may go stale.
- class ScopedRankingsBlock : public scoped_ptr<CacheRankingsBlock> {
+ class ScopedRankingsBlock : public std::unique_ptr<CacheRankingsBlock> {
public:
ScopedRankingsBlock();
explicit ScopedRankingsBlock(Rankings* rankings);
@@ -84,7 +84,7 @@ class Rankings {
void reset(CacheRankingsBlock* p = NULL) {
if (p != get())
rankings_->FreeRankingsBlock(get());
- scoped_ptr<CacheRankingsBlock>::reset(p);
+ std::unique_ptr<CacheRankingsBlock>::reset(p);
}
private:
diff --git a/chromium/net/disk_cache/blockfile/sparse_control.cc b/chromium/net/disk_cache/blockfile/sparse_control.cc
index 1de0d60cd11..62c2996cf26 100644
--- a/chromium/net/disk_cache/blockfile/sparse_control.cc
+++ b/chromium/net/disk_cache/blockfile/sparse_control.cc
@@ -14,7 +14,7 @@
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
@@ -79,7 +79,7 @@ class ChildrenDeleter
std::string name_;
disk_cache::Bitmap children_map_;
int64_t signature_;
- scoped_ptr<char[]> buffer_;
+ std::unique_ptr<char[]> buffer_;
DISALLOW_COPY_AND_ASSIGN(ChildrenDeleter);
};
diff --git a/chromium/net/disk_cache/blockfile/sparse_control_v3.cc b/chromium/net/disk_cache/blockfile/sparse_control_v3.cc
deleted file mode 100644
index 82a322122a0..00000000000
--- a/chromium/net/disk_cache/blockfile/sparse_control_v3.cc
+++ /dev/null
@@ -1,873 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/disk_cache/blockfile/sparse_control.h"
-
-#include "base/bind.h"
-#include "base/format_macros.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/time/time.h"
-#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
-#include "net/disk_cache/blockfile/backend_impl.h"
-#include "net/disk_cache/blockfile/entry_impl.h"
-#include "net/disk_cache/blockfile/file.h"
-#include "net/disk_cache/net_log_parameters.h"
-
-using base::Time;
-
-namespace {
-
-// Stream of the sparse data index.
-const int kSparseIndex = 2;
-
-// Stream of the sparse data.
-const int kSparseData = 1;
-
-// We can have up to 64k children.
-const int kMaxMapSize = 8 * 1024;
-
-// The maximum number of bytes that a child can store.
-const int kMaxEntrySize = 0x100000;
-
-// The size of each data block (tracked by the child allocation bitmap).
-const int kBlockSize = 1024;
-
-// Returns the name of a child entry given the base_name and signature of the
-// parent and the child_id.
-// If the entry is called entry_name, child entries will be named something
-// like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the
-// number of the particular child.
-std::string GenerateChildName(const std::string& base_name,
- int64_t signature,
- int64_t child_id) {
- return base::StringPrintf("Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(),
- signature, child_id);
-}
-
-// This class deletes the children of a sparse entry.
-class ChildrenDeleter
- : public base::RefCounted<ChildrenDeleter>,
- public disk_cache::FileIOCallback {
- public:
- ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name)
- : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {}
-
- void OnFileIOComplete(int bytes_copied) override;
-
- // Two ways of deleting the children: if we have the children map, use Start()
- // directly, otherwise pass the data address to ReadData().
- void Start(char* buffer, int len);
- void ReadData(disk_cache::Addr address, int len);
-
- private:
- friend class base::RefCounted<ChildrenDeleter>;
- ~ChildrenDeleter() override {}
-
- void DeleteChildren();
-
- base::WeakPtr<disk_cache::BackendImpl> backend_;
- std::string name_;
- disk_cache::Bitmap children_map_;
- int64_t signature_;
- scoped_ptr<char[]> buffer_;
- DISALLOW_COPY_AND_ASSIGN(ChildrenDeleter);
-};
-
-// This is the callback of the file operation.
-void ChildrenDeleter::OnFileIOComplete(int bytes_copied) {
- char* buffer = buffer_.release();
- Start(buffer, bytes_copied);
-}
-
-void ChildrenDeleter::Start(char* buffer, int len) {
- buffer_.reset(buffer);
- if (len < static_cast<int>(sizeof(disk_cache::SparseData)))
- return Release();
-
- // Just copy the information from |buffer|, delete |buffer| and start deleting
- // the child entries.
- disk_cache::SparseData* data =
- reinterpret_cast<disk_cache::SparseData*>(buffer);
- signature_ = data->header.signature;
-
- int num_bits = (len - sizeof(disk_cache::SparseHeader)) * 8;
- children_map_.Resize(num_bits, false);
- children_map_.SetMap(data->bitmap, num_bits / 32);
- buffer_.reset();
-
- DeleteChildren();
-}
-
-void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) {
- DCHECK(address.is_block_file());
- if (!backend_)
- return Release();
-
- disk_cache::File* file(backend_->File(address));
- if (!file)
- return Release();
-
- size_t file_offset = address.start_block() * address.BlockSize() +
- disk_cache::kBlockHeaderSize;
-
- buffer_.reset(new char[len]);
- bool completed;
- if (!file->Read(buffer_.get(), len, file_offset, this, &completed))
- return Release();
-
- if (completed)
- OnFileIOComplete(len);
-
- // And wait until OnFileIOComplete gets called.
-}
-
-void ChildrenDeleter::DeleteChildren() {
- int child_id = 0;
- if (!children_map_.FindNextSetBit(&child_id) || !backend_) {
- // We are done. Just delete this object.
- return Release();
- }
- std::string child_name = GenerateChildName(name_, signature_, child_id);
- backend_->SyncDoomEntry(child_name);
- children_map_.Set(child_id, false);
-
- // Post a task to delete the next child.
- base::MessageLoop::current()->PostTask(
- FROM_HERE, base::Bind(&ChildrenDeleter::DeleteChildren, this));
-}
-
-// -----------------------------------------------------------------------
-
-// Returns the NetLog event type corresponding to a SparseOperation.
-net::NetLog::EventType GetSparseEventType(
- disk_cache::SparseControl::SparseOperation operation) {
- switch (operation) {
- case disk_cache::SparseControl::kReadOperation:
- return net::NetLog::TYPE_SPARSE_READ;
- case disk_cache::SparseControl::kWriteOperation:
- return net::NetLog::TYPE_SPARSE_WRITE;
- case disk_cache::SparseControl::kGetRangeOperation:
- return net::NetLog::TYPE_SPARSE_GET_RANGE;
- default:
- NOTREACHED();
- return net::NetLog::TYPE_CANCELLED;
- }
-}
-
-// Logs the end event for |operation| on a child entry. Range operations log
-// no events for each child they search through.
-void LogChildOperationEnd(const net::BoundNetLog& net_log,
- disk_cache::SparseControl::SparseOperation operation,
- int result) {
- if (net_log.IsCapturing()) {
- net::NetLog::EventType event_type;
- switch (operation) {
- case disk_cache::SparseControl::kReadOperation:
- event_type = net::NetLog::TYPE_SPARSE_READ_CHILD_DATA;
- break;
- case disk_cache::SparseControl::kWriteOperation:
- event_type = net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA;
- break;
- case disk_cache::SparseControl::kGetRangeOperation:
- return;
- default:
- NOTREACHED();
- return;
- }
- net_log.EndEventWithNetErrorCode(event_type, result);
- }
-}
-
-} // namespace.
-
-namespace disk_cache {
-
-SparseControl::SparseControl(EntryImpl* entry)
- : entry_(entry),
- child_(NULL),
- operation_(kNoOperation),
- pending_(false),
- finished_(false),
- init_(false),
- range_found_(false),
- abort_(false),
- child_map_(child_data_.bitmap, kNumSparseBits, kNumSparseBits / 32),
- offset_(0),
- buf_len_(0),
- child_offset_(0),
- child_len_(0),
- result_(0) {
- memset(&sparse_header_, 0, sizeof(sparse_header_));
- memset(&child_data_, 0, sizeof(child_data_));
-}
-
-SparseControl::~SparseControl() {
- if (child_)
- CloseChild();
- if (init_)
- WriteSparseData();
-}
-
-bool SparseControl::CouldBeSparse() const {
- DCHECK(!init_);
-
- if (entry_->GetDataSize(kSparseData))
- return false;
-
- // We don't verify the data, just see if it could be there.
- return (entry_->GetDataSize(kSparseIndex) != 0);
-}
-
-int SparseControl::StartIO(SparseOperation op,
- int64_t offset,
- net::IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) {
- DCHECK(init_);
- // We don't support simultaneous IO for sparse data.
- if (operation_ != kNoOperation)
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- if (offset < 0 || buf_len < 0)
- return net::ERR_INVALID_ARGUMENT;
-
- // We only support up to 64 GB.
- if (offset + buf_len >= 0x1000000000LL || offset + buf_len < 0)
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- DCHECK(!user_buf_);
- DCHECK(user_callback_.is_null());
-
- if (!buf && (op == kReadOperation || op == kWriteOperation))
- return 0;
-
- // Copy the operation parameters.
- operation_ = op;
- offset_ = offset;
- user_buf_ = buf ? new net::DrainableIOBuffer(buf, buf_len) : NULL;
- buf_len_ = buf_len;
- user_callback_ = callback;
-
- result_ = 0;
- pending_ = false;
- finished_ = false;
- abort_ = false;
-
- if (entry_->net_log().IsCapturing()) {
- entry_->net_log().BeginEvent(
- GetSparseEventType(operation_),
- CreateNetLogSparseOperationCallback(offset_, buf_len_));
- }
- DoChildrenIO();
-
- if (!pending_) {
- // Everything was done synchronously.
- operation_ = kNoOperation;
- user_buf_ = NULL;
- user_callback_.Reset();
- return result_;
- }
-
- return net::ERR_IO_PENDING;
-}
-
-int SparseControl::GetAvailableRange(int64_t offset, int len, int64_t* start) {
- DCHECK(init_);
- // We don't support simultaneous IO for sparse data.
- if (operation_ != kNoOperation)
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- DCHECK(start);
-
- range_found_ = false;
- int result = StartIO(
- kGetRangeOperation, offset, NULL, len, CompletionCallback());
- if (range_found_) {
- *start = offset_;
- return result;
- }
-
- // This is a failure. We want to return a valid start value in any case.
- *start = offset;
- return result < 0 ? result : 0; // Don't mask error codes to the caller.
-}
-
-void SparseControl::CancelIO() {
- if (operation_ == kNoOperation)
- return;
- abort_ = true;
-}
-
-int SparseControl::ReadyToUse(const CompletionCallback& callback) {
- if (!abort_)
- return net::OK;
-
- // We'll grab another reference to keep this object alive because we just have
- // one extra reference due to the pending IO operation itself, but we'll
- // release that one before invoking user_callback_.
- entry_->AddRef(); // Balanced in DoAbortCallbacks.
- abort_callbacks_.push_back(callback);
- return net::ERR_IO_PENDING;
-}
-
-// Static
-void SparseControl::DeleteChildren(EntryImpl* entry) {
- DCHECK(entry->GetEntryFlags() & PARENT_ENTRY);
- int data_len = entry->GetDataSize(kSparseIndex);
- if (data_len < static_cast<int>(sizeof(SparseData)) ||
- entry->GetDataSize(kSparseData))
- return;
-
- int map_len = data_len - sizeof(SparseHeader);
- if (map_len > kMaxMapSize || map_len % 4)
- return;
-
- char* buffer;
- Addr address;
- entry->GetData(kSparseIndex, &buffer, &address);
- if (!buffer && !address.is_initialized())
- return;
-
- entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN);
-
- DCHECK(entry->backend_);
- ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_.get(),
- entry->GetKey());
- // The object will self destruct when finished.
- deleter->AddRef();
-
- if (buffer) {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&ChildrenDeleter::Start, deleter, buffer, data_len));
- } else {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&ChildrenDeleter::ReadData, deleter, address, data_len));
- }
-}
-
-// -----------------------------------------------------------------------
-
-int SparseControl::Init() {
- DCHECK(!init_);
-
- // We should not have sparse data for the exposed entry.
- if (entry_->GetDataSize(kSparseData))
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- // Now see if there is something where we store our data.
- int rv = net::OK;
- int data_len = entry_->GetDataSize(kSparseIndex);
- if (!data_len) {
- rv = CreateSparseEntry();
- } else {
- rv = OpenSparseEntry(data_len);
- }
-
- if (rv == net::OK)
- init_ = true;
- return rv;
-}
-
-// We are going to start using this entry to store sparse data, so we have to
-// initialize our control info.
-int SparseControl::CreateSparseEntry() {
- if (CHILD_ENTRY & entry_->GetEntryFlags())
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- memset(&sparse_header_, 0, sizeof(sparse_header_));
- sparse_header_.signature = Time::Now().ToInternalValue();
- sparse_header_.magic = kIndexMagic;
- sparse_header_.parent_key_len = entry_->GetKey().size();
- children_map_.Resize(kNumSparseBits, true);
-
- // Save the header. The bitmap is saved in the destructor.
- scoped_refptr<net::IOBuffer> buf(
- new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
-
- int rv = entry_->WriteData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_),
- CompletionCallback(), false);
- if (rv != sizeof(sparse_header_)) {
- DLOG(ERROR) << "Unable to save sparse_header_";
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
- }
-
- entry_->SetEntryFlags(PARENT_ENTRY);
- return net::OK;
-}
-
-// We are opening an entry from disk. Make sure that our control data is there.
-int SparseControl::OpenSparseEntry(int data_len) {
- if (data_len < static_cast<int>(sizeof(SparseData)))
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- if (entry_->GetDataSize(kSparseData))
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- if (!(PARENT_ENTRY & entry_->GetEntryFlags()))
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB.
- int map_len = data_len - sizeof(sparse_header_);
- if (map_len > kMaxMapSize || map_len % 4)
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- scoped_refptr<net::IOBuffer> buf(
- new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
-
- // Read header.
- int rv = entry_->ReadData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_),
- CompletionCallback());
- if (rv != static_cast<int>(sizeof(sparse_header_)))
- return net::ERR_CACHE_READ_FAILURE;
-
- // The real validation should be performed by the caller. This is just to
- // double check.
- if (sparse_header_.magic != kIndexMagic ||
- sparse_header_.parent_key_len !=
- static_cast<int>(entry_->GetKey().size()))
- return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
-
- // Read the actual bitmap.
- buf = new net::IOBuffer(map_len);
- rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf.get(),
- map_len, CompletionCallback());
- if (rv != map_len)
- return net::ERR_CACHE_READ_FAILURE;
-
- // Grow the bitmap to the current size and copy the bits.
- children_map_.Resize(map_len * 8, false);
- children_map_.SetMap(reinterpret_cast<uint32_t*>(buf->data()), map_len);
- return net::OK;
-}
-
-bool SparseControl::OpenChild() {
- DCHECK_GE(result_, 0);
-
- std::string key = GenerateChildKey();
- if (child_) {
- // Keep using the same child or open another one?.
- if (key == child_->GetKey())
- return true;
- CloseChild();
- }
-
- // See if we are tracking this child.
- if (!ChildPresent())
- return ContinueWithoutChild(key);
-
- if (!entry_->backend_)
- return false;
-
- child_ = entry_->backend_->OpenEntryImpl(key);
- if (!child_)
- return ContinueWithoutChild(key);
-
- EntryImpl* child = static_cast<EntryImpl*>(child_);
- if (!(CHILD_ENTRY & child->GetEntryFlags()) ||
- child->GetDataSize(kSparseIndex) <
- static_cast<int>(sizeof(child_data_)))
- return KillChildAndContinue(key, false);
-
- scoped_refptr<net::WrappedIOBuffer> buf(
- new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
-
- // Read signature.
- int rv = child_->ReadData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
- CompletionCallback());
- if (rv != sizeof(child_data_))
- return KillChildAndContinue(key, true); // This is a fatal failure.
-
- if (child_data_.header.signature != sparse_header_.signature ||
- child_data_.header.magic != kIndexMagic)
- return KillChildAndContinue(key, false);
-
- if (child_data_.header.last_block_len < 0 ||
- child_data_.header.last_block_len > kBlockSize) {
- // Make sure these values are always within range.
- child_data_.header.last_block_len = 0;
- child_data_.header.last_block = -1;
- }
-
- return true;
-}
-
-void SparseControl::CloseChild() {
- scoped_refptr<net::WrappedIOBuffer> buf(
- new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
-
- // Save the allocation bitmap before closing the child entry.
- int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
- CompletionCallback(),
- false);
- if (rv != sizeof(child_data_))
- DLOG(ERROR) << "Failed to save child data";
- child_->Release();
- child_ = NULL;
-}
-
-// We were not able to open this child; see what we can do.
-bool SparseControl::ContinueWithoutChild(const std::string& key) {
- if (kReadOperation == operation_)
- return false;
- if (kGetRangeOperation == operation_)
- return true;
-
- if (!entry_->backend_)
- return false;
-
- child_ = entry_->backend_->CreateEntryImpl(key);
- if (!child_) {
- child_ = NULL;
- result_ = net::ERR_CACHE_READ_FAILURE;
- return false;
- }
- // Write signature.
- InitChildData();
- return true;
-}
-
-void SparseControl::WriteSparseData() {
- scoped_refptr<net::IOBuffer> buf(new net::WrappedIOBuffer(
- reinterpret_cast<const char*>(children_map_.GetMap())));
-
- int len = children_map_.ArraySize() * 4;
- int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf.get(),
- len, CompletionCallback(), false);
- if (rv != len) {
- DLOG(ERROR) << "Unable to save sparse map";
- }
-}
-
-bool SparseControl::DoChildIO() {
- finished_ = true;
- if (!buf_len_ || result_ < 0)
- return false;
-
- if (!OpenChild())
- return false;
-
- if (!VerifyRange())
- return false;
-
- // We have more work to do. Let's not trigger a callback to the caller.
- finished_ = false;
- CompletionCallback callback;
- if (!user_callback_.is_null()) {
- callback =
- base::Bind(&SparseControl::OnChildIOCompleted, base::Unretained(this));
- }
-
- int rv = 0;
- switch (operation_) {
- case kReadOperation:
- if (entry_->net_log().IsCapturing()) {
- entry_->net_log().BeginEvent(
- net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
- CreateNetLogSparseReadWriteCallback(child_->net_log().source(),
- child_len_));
- }
- rv = child_->ReadDataImpl(kSparseData, child_offset_, user_buf_.get(),
- child_len_, callback);
- break;
- case kWriteOperation:
- if (entry_->net_log().IsCapturing()) {
- entry_->net_log().BeginEvent(
- net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
- CreateNetLogSparseReadWriteCallback(child_->net_log().source(),
- child_len_));
- }
- rv = child_->WriteDataImpl(kSparseData, child_offset_, user_buf_.get(),
- child_len_, callback, false);
- break;
- case kGetRangeOperation:
- rv = DoGetAvailableRange();
- break;
- default:
- NOTREACHED();
- }
-
- if (rv == net::ERR_IO_PENDING) {
- if (!pending_) {
- pending_ = true;
- // The child will protect himself against closing the entry while IO is in
- // progress. However, this entry can still be closed, and that would not
- // be a good thing for us, so we increase the refcount until we're
- // finished doing sparse stuff.
- entry_->AddRef(); // Balanced in DoUserCallback.
- }
- return false;
- }
- if (!rv)
- return false;
-
- DoChildIOCompleted(rv);
- return true;
-}
-
-void SparseControl::DoChildIOCompleted(int result) {
- LogChildOperationEnd(entry_->net_log(), operation_, result);
- if (result < 0) {
- // We fail the whole operation if we encounter an error.
- result_ = result;
- return;
- }
-
- UpdateRange(result);
-
- result_ += result;
- offset_ += result;
- buf_len_ -= result;
-
- // We'll be reusing the user provided buffer for the next chunk.
- if (buf_len_ && user_buf_)
- user_buf_->DidConsume(result);
-}
-
-std::string SparseControl::GenerateChildKey() {
- return GenerateChildName(entry_->GetKey(), sparse_header_.signature,
- offset_ >> 20);
-}
-
-// We are deleting the child because something went wrong.
-bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) {
- SetChildBit(false);
- child_->DoomImpl();
- child_->Release();
- child_ = NULL;
- if (fatal) {
- result_ = net::ERR_CACHE_READ_FAILURE;
- return false;
- }
- return ContinueWithoutChild(key);
-}
-
-bool SparseControl::ChildPresent() {
- int child_bit = static_cast<int>(offset_ >> 20);
- if (children_map_.Size() <= child_bit)
- return false;
-
- return children_map_.Get(child_bit);
-}
-
-void SparseControl::SetChildBit(bool value) {
- int child_bit = static_cast<int>(offset_ >> 20);
-
- // We may have to increase the bitmap of child entries.
- if (children_map_.Size() <= child_bit)
- children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true);
-
- children_map_.Set(child_bit, value);
-}
-
-bool SparseControl::VerifyRange() {
- DCHECK_GE(result_, 0);
-
- child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1);
- child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_);
-
- // We can write to (or get info from) anywhere in this child.
- if (operation_ != kReadOperation)
- return true;
-
- // Check that there are no holes in this range.
- int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
- int start = child_offset_ >> 10;
- if (child_map_.FindNextBit(&start, last_bit, false)) {
- // Something is not here.
- DCHECK_GE(child_data_.header.last_block_len, 0);
- DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
- int partial_block_len = PartialBlockLength(start);
- if (start == child_offset_ >> 10) {
- // It looks like we don't have anything.
- if (partial_block_len <= (child_offset_ & (kBlockSize - 1)))
- return false;
- }
-
- // We have the first part.
- child_len_ = (start << 10) - child_offset_;
- if (partial_block_len) {
- // We may have a few extra bytes.
- child_len_ = std::min(child_len_ + partial_block_len, buf_len_);
- }
- // There is no need to read more after this one.
- buf_len_ = child_len_;
- }
- return true;
-}
-
-void SparseControl::UpdateRange(int result) {
- if (result <= 0 || operation_ != kWriteOperation)
- return;
-
- DCHECK_GE(child_data_.header.last_block_len, 0);
- DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
-
- // Write the bitmap.
- int first_bit = child_offset_ >> 10;
- int block_offset = child_offset_ & (kBlockSize - 1);
- if (block_offset && (child_data_.header.last_block != first_bit ||
- child_data_.header.last_block_len < block_offset)) {
- // The first block is not completely filled; ignore it.
- first_bit++;
- }
-
- int last_bit = (child_offset_ + result) >> 10;
- block_offset = (child_offset_ + result) & (kBlockSize - 1);
-
- // This condition will hit with the following criteria:
- // 1. The first byte doesn't follow the last write.
- // 2. The first byte is in the middle of a block.
- // 3. The first byte and the last byte are in the same block.
- if (first_bit > last_bit)
- return;
-
- if (block_offset && !child_map_.Get(last_bit)) {
- // The last block is not completely filled; save it for later.
- child_data_.header.last_block = last_bit;
- child_data_.header.last_block_len = block_offset;
- } else {
- child_data_.header.last_block = -1;
- }
-
- child_map_.SetRange(first_bit, last_bit, true);
-}
-
-int SparseControl::PartialBlockLength(int block_index) const {
- if (block_index == child_data_.header.last_block)
- return child_data_.header.last_block_len;
-
- // This may be the last stored index.
- int entry_len = child_->GetDataSize(kSparseData);
- if (block_index == entry_len >> 10)
- return entry_len & (kBlockSize - 1);
-
- // This is really empty.
- return 0;
-}
-
-void SparseControl::InitChildData() {
- // We know the real type of child_.
- EntryImpl* child = static_cast<EntryImpl*>(child_);
- child->SetEntryFlags(CHILD_ENTRY);
-
- memset(&child_data_, 0, sizeof(child_data_));
- child_data_.header = sparse_header_;
-
- scoped_refptr<net::WrappedIOBuffer> buf(
- new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
-
- int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
- CompletionCallback(), false);
- if (rv != sizeof(child_data_))
- DLOG(ERROR) << "Failed to save child data";
- SetChildBit(true);
-}
-
-int SparseControl::DoGetAvailableRange() {
- if (!child_)
- return child_len_; // Move on to the next child.
-
- // Check that there are no holes in this range.
- int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
- int start = child_offset_ >> 10;
- int partial_start_bytes = PartialBlockLength(start);
- int found = start;
- int bits_found = child_map_.FindBits(&found, last_bit, true);
-
- // We don't care if there is a partial block in the middle of the range.
- int block_offset = child_offset_ & (kBlockSize - 1);
- if (!bits_found && partial_start_bytes <= block_offset)
- return child_len_;
-
- // We are done. Just break the loop and reset result_ to our real result.
- range_found_ = true;
-
- // found now points to the first 1. Lets see if we have zeros before it.
- int empty_start = std::max((found << 10) - child_offset_, 0);
-
- int bytes_found = bits_found << 10;
- bytes_found += PartialBlockLength(found + bits_found);
-
- if (start == found)
- bytes_found -= block_offset;
-
- // If the user is searching past the end of this child, bits_found is the
- // right result; otherwise, we have some empty space at the start of this
- // query that we have to subtract from the range that we searched.
- result_ = std::min(bytes_found, child_len_ - empty_start);
-
- if (!bits_found) {
- result_ = std::min(partial_start_bytes - block_offset, child_len_);
- empty_start = 0;
- }
-
- // Only update offset_ when this query found zeros at the start.
- if (empty_start)
- offset_ += empty_start;
-
- // This will actually break the loop.
- buf_len_ = 0;
- return 0;
-}
-
-void SparseControl::DoUserCallback() {
- DCHECK(!user_callback_.is_null());
- CompletionCallback cb = user_callback_;
- user_callback_.Reset();
- user_buf_ = NULL;
- pending_ = false;
- operation_ = kNoOperation;
- int rv = result_;
- entry_->Release(); // Don't touch object after this line.
- cb.Run(rv);
-}
-
-void SparseControl::DoAbortCallbacks() {
- for (size_t i = 0; i < abort_callbacks_.size(); i++) {
- // Releasing all references to entry_ may result in the destruction of this
- // object so we should not be touching it after the last Release().
- CompletionCallback cb = abort_callbacks_[i];
- if (i == abort_callbacks_.size() - 1)
- abort_callbacks_.clear();
-
- entry_->Release(); // Don't touch object after this line.
- cb.Run(net::OK);
- }
-}
-
-void SparseControl::OnChildIOCompleted(int result) {
- DCHECK_NE(net::ERR_IO_PENDING, result);
- DoChildIOCompleted(result);
-
- if (abort_) {
- // We'll return the current result of the operation, which may be less than
- // the bytes to read or write, but the user cancelled the operation.
- abort_ = false;
- if (entry_->net_log().IsCapturing()) {
- entry_->net_log().AddEvent(net::NetLog::TYPE_CANCELLED);
- entry_->net_log().EndEvent(GetSparseEventType(operation_));
- }
- // We have an indirect reference to this object for every callback so if
- // there is only one callback, we may delete this object before reaching
- // DoAbortCallbacks.
- bool has_abort_callbacks = !abort_callbacks_.empty();
- DoUserCallback();
- if (has_abort_callbacks)
- DoAbortCallbacks();
- return;
- }
-
- // We are running a callback from the message loop. It's time to restart what
- // we were doing before.
- DoChildrenIO();
-}
-
-} // namespace disk_cache
diff --git a/chromium/net/disk_cache/blockfile/sparse_control_v3.h b/chromium/net/disk_cache/blockfile/sparse_control_v3.h
deleted file mode 100644
index a6c7d19c1cf..00000000000
--- a/chromium/net/disk_cache/blockfile/sparse_control_v3.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_DISK_CACHE_BLOCKFILE_SPARSE_CONTROL_V3_H_
-#define NET_DISK_CACHE_BLOCKFILE_SPARSE_CONTROL_V3_H_
-
-#include <stdint.h>
-
-#include <string>
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "net/base/completion_callback.h"
-#include "net/disk_cache/blockfile/bitmap.h"
-#include "net/disk_cache/disk_format.h"
-
-namespace net {
-class IOBuffer;
-class DrainableIOBuffer;
-}
-
-namespace disk_cache {
-
-class Entry;
-class EntryImpl;
-
-// This class provides support for the sparse capabilities of the disk cache.
-// Basically, sparse IO is directed from EntryImpl to this class, and we split
-// the operation into multiple small pieces, sending each one to the
-// appropriate entry. An instance of this class is asociated with each entry
-// used directly for sparse operations (the entry passed in to the constructor).
-class SparseControl {
- public:
- typedef net::CompletionCallback CompletionCallback;
-
- // The operation to perform.
- enum SparseOperation {
- kNoOperation,
- kReadOperation,
- kWriteOperation,
- kGetRangeOperation
- };
-
- explicit SparseControl(EntryImpl* entry);
- ~SparseControl();
-
- // Performs a quick test to see if the entry is sparse or not, without
- // generating disk IO (so the answer provided is only a best effort).
- bool CouldBeSparse() const;
-
- // Performs an actual sparse read or write operation for this entry. |op| is
- // the operation to perform, |offset| is the desired sparse offset, |buf| and
- // |buf_len| specify the actual data to use and |callback| is the callback
- // to use for asynchronous operations. See the description of the Read /
- // WriteSparseData for details about the arguments. The return value is the
- // number of bytes read or written, or a net error code.
- int StartIO(SparseOperation op,
- int64_t offset,
- net::IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback);
-
- // Implements Entry::GetAvailableRange().
- int GetAvailableRange(int64_t offset, int len, int64_t* start);
-
- // Cancels the current sparse operation (if any).
- void CancelIO();
-
- // Returns OK if the entry can be used for new IO or ERR_IO_PENDING if we are
- // busy. If the entry is busy, we'll invoke the callback when we are ready
- // again. See disk_cache::Entry::ReadyToUse() for more info.
- int ReadyToUse(const CompletionCallback& completion_callback);
-
- // Deletes the children entries of |entry|.
- static void DeleteChildren(EntryImpl* entry);
-
- private:
- // Initializes the object for the current entry. If this entry already stores
- // sparse data, or can be used to do it, it updates the relevant information
- // on disk and returns net::OK. Otherwise it returns a net error code.
- int Init();
-
- // Creates a new sparse entry or opens an aready created entry from disk.
- // These methods just read / write the required info from disk for the current
- // entry, and verify that everything is correct. The return value is a net
- // error code.
- int CreateSparseEntry();
- int OpenSparseEntry(int data_len);
-
- // Opens and closes a child entry. A child entry is a regular EntryImpl object
- // with a key derived from the key of the resource to store and the range
- // stored by that child.
- bool OpenChild();
- void CloseChild();
-
- // Continues the current operation (open) without a current child.
- bool ContinueWithoutChild(const std::string& key);
-
- // Writes to disk the tracking information for this entry.
- void WriteSparseData();
-
- // Performs a single operation with the current child. Returns true when we
- // should move on to the next child and false when we should interrupt our
- // work.
- bool DoChildIO();
-
- // Performs the required work after a single IO operations finishes.
- void DoChildIOCompleted(int result);
-
- std::string GenerateChildKey();
-
- // Deletes the current child and continues the current operation (open).
- bool KillChildAndContinue(const std::string& key, bool fatal);
-
- // Returns true if the required child is tracked by the parent entry, i.e. it
- // was already created.
- bool ChildPresent();
-
- // Sets the bit for the current child to the provided |value|. In other words,
- // starts or stops tracking this child.
- void SetChildBit(bool value);
-
- // Verify that the range to be accessed for the current child is appropriate.
- // Returns false if an error is detected or there is no need to perform the
- // current IO operation (for instance if the required range is not stored by
- // the child).
- bool VerifyRange();
-
- // Updates the contents bitmap for the current range, based on the result of
- // the current operation.
- void UpdateRange(int result);
-
- // Returns the number of bytes stored at |block_index|, if its allocation-bit
- // is off (because it is not completely filled).
- int PartialBlockLength(int block_index) const;
-
- // Initializes the sparse info for the current child.
- void InitChildData();
-
- // Performs the required work for GetAvailableRange for one child.
- int DoGetAvailableRange();
-
- // Reports to the user that we are done.
- void DoUserCallback();
- void DoAbortCallbacks();
-
- // Invoked by the callback of asynchronous operations.
- void OnChildIOCompleted(int result);
-
- EntryImpl* entry_; // The sparse entry.
- EntryImpl* child_; // The current child entry.
- SparseOperation operation_;
- bool pending_; // True if any child IO operation returned pending.
- bool finished_;
- bool init_;
- bool range_found_; // True if GetAvailableRange found something.
- bool abort_; // True if we should abort the current operation ASAP.
-
- SparseHeader sparse_header_; // Data about the children of entry_.
- Bitmap children_map_; // The actual bitmap of children.
- SparseData child_data_; // Parent and allocation map of child_.
- Bitmap child_map_; // The allocation map as a bitmap.
-
- CompletionCallback user_callback_;
- std::vector<CompletionCallback> abort_callbacks_;
- int64_t offset_; // Current sparse offset.
- scoped_refptr<net::DrainableIOBuffer> user_buf_;
- int buf_len_; // Bytes to read or write.
- int child_offset_; // Offset to use for the current child.
- int child_len_; // Bytes to read or write for this child.
- int result_;
-
- DISALLOW_COPY_AND_ASSIGN(SparseControl);
-};
-
-} // namespace disk_cache
-
-#endif // NET_DISK_CACHE_BLOCKFILE_SPARSE_CONTROL_V3_H_
diff --git a/chromium/net/disk_cache/blockfile/stats_unittest.cc b/chromium/net/disk_cache/blockfile/stats_unittest.cc
index fe47bdd4cf6..22ef4622360 100644
--- a/chromium/net/disk_cache/blockfile/stats_unittest.cc
+++ b/chromium/net/disk_cache/blockfile/stats_unittest.cc
@@ -4,7 +4,8 @@
#include "net/disk_cache/blockfile/stats.h"
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
#include "testing/gtest/include/gtest/gtest.h"
TEST(DiskCacheStatsTest, Init) {
@@ -16,7 +17,7 @@ TEST(DiskCacheStatsTest, Init) {
TEST(DiskCacheStatsTest, InitWithEmptyBuffer) {
disk_cache::Stats stats;
int required_len = stats.StorageSize();
- scoped_ptr<char[]> storage(new char[required_len]);
+ std::unique_ptr<char[]> storage(new char[required_len]);
memset(storage.get(), 0, required_len);
ASSERT_TRUE(stats.Init(storage.get(), required_len, disk_cache::Addr()));
@@ -26,7 +27,7 @@ TEST(DiskCacheStatsTest, InitWithEmptyBuffer) {
TEST(DiskCacheStatsTest, FailsInit) {
disk_cache::Stats stats;
int required_len = stats.StorageSize();
- scoped_ptr<char[]> storage(new char[required_len]);
+ std::unique_ptr<char[]> storage(new char[required_len]);
memset(storage.get(), 0, required_len);
// Try a small buffer.
@@ -40,7 +41,7 @@ TEST(DiskCacheStatsTest, FailsInit) {
}
TEST(DiskCacheStatsTest, SaveRestore) {
- scoped_ptr<disk_cache::Stats> stats(new disk_cache::Stats);
+ std::unique_ptr<disk_cache::Stats> stats(new disk_cache::Stats);
disk_cache::Addr addr(5);
ASSERT_TRUE(stats->Init(nullptr, 0, addr));
@@ -51,7 +52,7 @@ TEST(DiskCacheStatsTest, SaveRestore) {
stats->OnEvent(disk_cache::Stats::DOOM_RECENT);
int required_len = stats->StorageSize();
- scoped_ptr<char[]> storage(new char[required_len]);
+ std::unique_ptr<char[]> storage(new char[required_len]);
disk_cache::Addr out_addr;
int real_len = stats->SerializeStats(storage.get(), required_len, &out_addr);
EXPECT_GE(required_len, real_len);
diff --git a/chromium/net/disk_cache/cache_creator.cc b/chromium/net/disk_cache/cache_creator.cc
index 54338c1fb08..5245fe83896 100644
--- a/chromium/net/disk_cache/cache_creator.cc
+++ b/chromium/net/disk_cache/cache_creator.cc
@@ -32,7 +32,7 @@ class CacheCreator {
uint32_t flags,
const scoped_refptr<base::SingleThreadTaskRunner>& thread,
net::NetLog* net_log,
- scoped_ptr<disk_cache::Backend>* backend,
+ std::unique_ptr<disk_cache::Backend>* backend,
const net::CompletionCallback& callback);
// Creates the backend.
@@ -53,9 +53,9 @@ class CacheCreator {
net::BackendType backend_type_;
uint32_t flags_;
scoped_refptr<base::SingleThreadTaskRunner> thread_;
- scoped_ptr<disk_cache::Backend>* backend_;
+ std::unique_ptr<disk_cache::Backend>* backend_;
net::CompletionCallback callback_;
- scoped_ptr<disk_cache::Backend> created_cache_;
+ std::unique_ptr<disk_cache::Backend> created_cache_;
net::NetLog* net_log_;
DISALLOW_COPY_AND_ASSIGN(CacheCreator);
@@ -70,7 +70,7 @@ CacheCreator::CacheCreator(
uint32_t flags,
const scoped_refptr<base::SingleThreadTaskRunner>& thread,
net::NetLog* net_log,
- scoped_ptr<disk_cache::Backend>* backend,
+ std::unique_ptr<disk_cache::Backend>* backend,
const net::CompletionCallback& callback)
: path_(path),
force_(force),
@@ -164,7 +164,7 @@ int CreateCacheBackend(
bool force,
const scoped_refptr<base::SingleThreadTaskRunner>& thread,
net::NetLog* net_log,
- scoped_ptr<Backend>* backend,
+ std::unique_ptr<Backend>* backend,
const net::CompletionCallback& callback) {
DCHECK(!callback.is_null());
if (type == net::MEMORY_CACHE) {
diff --git a/chromium/net/disk_cache/disk_cache.h b/chromium/net/disk_cache/disk_cache.h
index 5f99c325058..f71bc81964b 100644
--- a/chromium/net/disk_cache/disk_cache.h
+++ b/chromium/net/disk_cache/disk_cache.h
@@ -10,11 +10,11 @@
#include <stdint.h>
+#include <memory>
#include <string>
#include <vector>
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
#include "base/strings/string_split.h"
#include "base/time/time.h"
#include "net/base/cache_type.h"
@@ -59,7 +59,7 @@ NET_EXPORT int CreateCacheBackend(
bool force,
const scoped_refptr<base::SingleThreadTaskRunner>& thread,
net::NetLog* net_log,
- scoped_ptr<Backend>* backend,
+ std::unique_ptr<Backend>* backend,
const net::CompletionCallback& callback);
// The root interface for a disk cache instance.
@@ -154,7 +154,7 @@ class NET_EXPORT Backend {
// Returns an iterator which will enumerate all entries of the cache in an
// undefined order.
- virtual scoped_ptr<Iterator> CreateIterator() = 0;
+ virtual std::unique_ptr<Iterator> CreateIterator() = 0;
// Return a list of cache statistics.
virtual void GetStats(base::StringPairs* stats) = 0;
@@ -333,7 +333,7 @@ struct EntryDeleter {
};
// Automatically closes an entry when it goes out of scope.
-typedef scoped_ptr<Entry, EntryDeleter> ScopedEntryPtr;
+typedef std::unique_ptr<Entry, EntryDeleter> ScopedEntryPtr;
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/disk_cache_perftest.cc b/chromium/net/disk_cache/disk_cache_perftest.cc
new file mode 100644
index 00000000000..959143b884d
--- /dev/null
+++ b/chromium/net/disk_cache/disk_cache_perftest.cc
@@ -0,0 +1,304 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+#include <string>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/hash.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_util.h"
+#include "base/test/perf_time_logger.h"
+#include "base/test/test_file_util.h"
+#include "base/threading/thread.h"
+#include "net/base/cache_type.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/blockfile/backend_impl.h"
+#include "net/disk_cache/blockfile/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+using base::Time;
+
+namespace {
+
+size_t MaybeGetMaxFds() {
+#if defined(OS_POSIX)
+ return base::GetMaxFds();
+#else
+ return std::numeric_limits<size_t>::max();
+#endif
+}
+
+void MaybeSetFdLimit(unsigned int max_descriptors) {
+#if defined(OS_POSIX)
+ base::SetFdLimit(max_descriptors);
+#endif
+}
+
+struct TestEntry {
+ std::string key;
+ int data_len;
+};
+
+class DiskCachePerfTest : public DiskCacheTestWithCache {
+ public:
+ DiskCachePerfTest() : saved_fd_limit_(MaybeGetMaxFds()) {
+ if (saved_fd_limit_ < kFdLimitForCacheTests)
+ MaybeSetFdLimit(kFdLimitForCacheTests);
+ }
+
+ ~DiskCachePerfTest() override {
+ if (saved_fd_limit_ < kFdLimitForCacheTests)
+ MaybeSetFdLimit(kFdLimitForCacheTests);
+ }
+
+ protected:
+ enum class WhatToRead {
+ HEADERS_ONLY,
+ HEADERS_AND_BODY,
+ };
+
+ // Helper methods for constructing tests.
+ bool TimeWrite();
+ bool TimeRead(WhatToRead what_to_read, const char* timer_message);
+ void ResetAndEvictSystemDiskCache();
+
+ // Complete perf tests.
+ void CacheBackendPerformance();
+
+ const size_t kFdLimitForCacheTests = 8192;
+
+ const int kNumEntries = 1000;
+ const int kHeadersSize = 200;
+ const int kBodySize = 16 * 1024 - 1;
+
+ std::vector<TestEntry> entries_;
+
+ private:
+ const size_t saved_fd_limit_;
+};
+
+// Creates num_entries on the cache, and writes 200 bytes of metadata and up
+// to kBodySize of data to each entry.
+bool DiskCachePerfTest::TimeWrite() {
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kHeadersSize));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kBodySize));
+
+ CacheTestFillBuffer(buffer1->data(), kHeadersSize, false);
+ CacheTestFillBuffer(buffer2->data(), kBodySize, false);
+
+ int expected = 0;
+
+ MessageLoopHelper helper;
+ CallbackTest callback(&helper, true);
+
+ base::PerfTimeLogger timer("Write disk cache entries");
+
+ for (int i = 0; i < kNumEntries; i++) {
+ TestEntry entry;
+ entry.key = GenerateKey(true);
+ entry.data_len = rand() % kBodySize;
+ entries_.push_back(entry);
+
+ disk_cache::Entry* cache_entry;
+ net::TestCompletionCallback cb;
+ int rv = cache_->CreateEntry(entry.key, &cache_entry, cb.callback());
+ if (net::OK != cb.GetResult(rv))
+ break;
+ int ret = cache_entry->WriteData(
+ 0, 0, buffer1.get(), kHeadersSize,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (kHeadersSize != ret)
+ break;
+
+ ret = cache_entry->WriteData(
+ 1, 0, buffer2.get(), entry.data_len,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entry.data_len != ret)
+ break;
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return expected == helper.callbacks_called();
+}
+
+// Reads the data and metadata from each entry listed on |entries|.
+bool DiskCachePerfTest::TimeRead(WhatToRead what_to_read,
+ const char* timer_message) {
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kHeadersSize));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kBodySize));
+
+ CacheTestFillBuffer(buffer1->data(), kHeadersSize, false);
+ CacheTestFillBuffer(buffer2->data(), kBodySize, false);
+
+ int expected = 0;
+
+ MessageLoopHelper helper;
+ CallbackTest callback(&helper, true);
+
+ base::PerfTimeLogger timer(timer_message);
+
+ for (int i = 0; i < kNumEntries; i++) {
+ disk_cache::Entry* cache_entry;
+ net::TestCompletionCallback cb;
+ int rv = cache_->OpenEntry(entries_[i].key, &cache_entry, cb.callback());
+ if (net::OK != cb.GetResult(rv))
+ break;
+ int ret = cache_entry->ReadData(
+ 0, 0, buffer1.get(), kHeadersSize,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (kHeadersSize != ret)
+ break;
+
+ if (what_to_read == WhatToRead::HEADERS_AND_BODY) {
+ ret = cache_entry->ReadData(
+ 1, 0, buffer2.get(), entries_[i].data_len,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entries_[i].data_len != ret)
+ break;
+ }
+
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return (expected == helper.callbacks_called());
+}
+
+TEST_F(DiskCachePerfTest, BlockfileHashes) {
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ base::PerfTimeLogger timer("Hash disk cache keys");
+ for (int i = 0; i < 300000; i++) {
+ std::string key = GenerateKey(true);
+ base::Hash(key);
+ }
+ timer.Done();
+}
+
+void DiskCachePerfTest::ResetAndEvictSystemDiskCache() {
+ base::MessageLoop::current()->RunUntilIdle();
+ cache_.reset();
+
+ // Flush all files in the cache out of system memory.
+ const base::FilePath::StringType file_pattern = FILE_PATH_LITERAL("*");
+ base::FileEnumerator enumerator(cache_path_, true /* recursive */,
+ base::FileEnumerator::FILES, file_pattern);
+ for (base::FilePath file_path = enumerator.Next(); !file_path.empty();
+ file_path = enumerator.Next()) {
+ ASSERT_TRUE(base::EvictFileFromSystemCache(file_path));
+ }
+#if defined(OS_LINUX)
+ // And, cache directories, on platforms where the eviction utility supports
+ // this (currently Linux only).
+ if (simple_cache_mode_) {
+ ASSERT_TRUE(
+ base::EvictFileFromSystemCache(cache_path_.AppendASCII("index-dir")));
+ }
+ ASSERT_TRUE(base::EvictFileFromSystemCache(cache_path_));
+#endif
+
+ DisableFirstCleanup();
+ InitCache();
+}
+
+void DiskCachePerfTest::CacheBackendPerformance() {
+ InitCache();
+ EXPECT_TRUE(TimeWrite());
+
+ ResetAndEvictSystemDiskCache();
+ EXPECT_TRUE(TimeRead(WhatToRead::HEADERS_ONLY,
+ "Read disk cache headers only (cold)"));
+ EXPECT_TRUE(TimeRead(WhatToRead::HEADERS_ONLY,
+ "Read disk cache headers only (warm)"));
+ base::MessageLoop::current()->RunUntilIdle();
+
+ ResetAndEvictSystemDiskCache();
+ EXPECT_TRUE(
+ TimeRead(WhatToRead::HEADERS_AND_BODY, "Read disk cache entries (cold)"));
+ EXPECT_TRUE(
+ TimeRead(WhatToRead::HEADERS_AND_BODY, "Read disk cache entries (warm)"));
+ base::MessageLoop::current()->RunUntilIdle();
+}
+
+TEST_F(DiskCachePerfTest, CacheBackendPerformance) {
+ CacheBackendPerformance();
+}
+
+TEST_F(DiskCachePerfTest, SimpleCacheBackendPerformance) {
+ SetSimpleCacheMode();
+ CacheBackendPerformance();
+}
+
+int BlockSize() {
+ // We can use form 1 to 4 blocks.
+ return (rand() & 0x3) + 1;
+}
+
+// Creating and deleting "entries" on a block-file is something quite frequent
+// (after all, almost everything is stored on block files). The operation is
+// almost free when the file is empty, but can be expensive if the file gets
+// fragmented, or if we have multiple files. This test measures that scenario,
+// by using multiple, highly fragmented files.
+TEST_F(DiskCachePerfTest, BlockFilesPerformance) {
+ ASSERT_TRUE(CleanupCacheDir());
+
+ disk_cache::BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumBlocks = 60000;
+ disk_cache::Addr address[kNumBlocks];
+
+ base::PerfTimeLogger timer1("Fill three block-files");
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kNumBlocks; i++) {
+ EXPECT_TRUE(
+ files.CreateBlock(disk_cache::RANKINGS, BlockSize(), &address[i]));
+ }
+
+ timer1.Done();
+ base::PerfTimeLogger timer2("Create and delete blocks");
+
+ for (int i = 0; i < 200000; i++) {
+ int entry = rand() * (kNumBlocks / RAND_MAX + 1);
+ if (entry >= kNumBlocks)
+ entry = 0;
+
+ files.DeleteBlock(address[entry], false);
+ EXPECT_TRUE(
+ files.CreateBlock(disk_cache::RANKINGS, BlockSize(), &address[entry]));
+ }
+
+ timer2.Done();
+ base::MessageLoop::current()->RunUntilIdle();
+}
+
+} // namespace
diff --git a/chromium/net/disk_cache/disk_cache_test_base.cc b/chromium/net/disk_cache/disk_cache_test_base.cc
index d3a21257325..7480cf45848 100644
--- a/chromium/net/disk_cache/disk_cache_test_base.cc
+++ b/chromium/net/disk_cache/disk_cache_test_base.cc
@@ -10,8 +10,8 @@
#include "base/path_service.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/base/test_completion_callback.h"
@@ -55,7 +55,7 @@ void DiskCacheTest::TearDown() {
}
DiskCacheTestWithCache::TestIterator::TestIterator(
- scoped_ptr<disk_cache::Backend::Iterator> iterator)
+ std::unique_ptr<disk_cache::Backend::Iterator> iterator)
: iterator_(std::move(iterator)) {}
DiskCacheTestWithCache::TestIterator::~TestIterator() {}
@@ -174,9 +174,10 @@ int DiskCacheTestWithCache::CalculateSizeOfAllEntries() {
return cb.GetResult(rv);
}
-scoped_ptr<DiskCacheTestWithCache::TestIterator>
- DiskCacheTestWithCache::CreateIterator() {
- return scoped_ptr<TestIterator>(new TestIterator(cache_->CreateIterator()));
+std::unique_ptr<DiskCacheTestWithCache::TestIterator>
+DiskCacheTestWithCache::CreateIterator() {
+ return std::unique_ptr<TestIterator>(
+ new TestIterator(cache_->CreateIterator()));
}
void DiskCacheTestWithCache::FlushQueueForTest() {
@@ -312,9 +313,9 @@ void DiskCacheTestWithCache::CreateBackend(uint32_t flags,
if (simple_cache_mode_) {
net::TestCompletionCallback cb;
- scoped_ptr<disk_cache::SimpleBackendImpl> simple_backend(
- new disk_cache::SimpleBackendImpl(
- cache_path_, size_, type_, runner, NULL));
+ std::unique_ptr<disk_cache::SimpleBackendImpl> simple_backend(
+ new disk_cache::SimpleBackendImpl(cache_path_, size_, type_, runner,
+ NULL));
int rv = simple_backend->Init(cb.callback());
ASSERT_EQ(net::OK, cb.GetResult(rv));
simple_cache_impl_ = simple_backend.get();
diff --git a/chromium/net/disk_cache/disk_cache_test_base.h b/chromium/net/disk_cache/disk_cache_test_base.h
index 713bba0e6a2..c94f5ac82ed 100644
--- a/chromium/net/disk_cache/disk_cache_test_base.h
+++ b/chromium/net/disk_cache/disk_cache_test_base.h
@@ -7,10 +7,11 @@
#include <stdint.h>
+#include <memory>
+
#include "base/files/file_path.h"
#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
#include "net/base/cache_type.h"
#include "net/disk_cache/disk_cache.h"
@@ -54,7 +55,7 @@ class DiskCacheTest : public PlatformTest {
private:
base::ScopedTempDir temp_dir_;
- scoped_ptr<base::MessageLoop> message_loop_;
+ std::unique_ptr<base::MessageLoop> message_loop_;
};
// Provides basic support for cache related tests.
@@ -62,13 +63,14 @@ class DiskCacheTestWithCache : public DiskCacheTest {
protected:
class TestIterator {
public:
- explicit TestIterator(scoped_ptr<disk_cache::Backend::Iterator> iterator);
+ explicit TestIterator(
+ std::unique_ptr<disk_cache::Backend::Iterator> iterator);
~TestIterator();
int OpenNextEntry(disk_cache::Entry** next_entry);
private:
- scoped_ptr<disk_cache::Backend::Iterator> iterator_;
+ std::unique_ptr<disk_cache::Backend::Iterator> iterator_;
};
DiskCacheTestWithCache();
@@ -130,7 +132,7 @@ class DiskCacheTestWithCache : public DiskCacheTest {
const base::Time end_time);
int CalculateSizeOfAllEntries();
int DoomEntriesSince(const base::Time initial_time);
- scoped_ptr<TestIterator> CreateIterator();
+ std::unique_ptr<TestIterator> CreateIterator();
void FlushQueueForTest();
void RunTaskForTest(const base::Closure& closure);
int ReadData(disk_cache::Entry* entry, int index, int offset,
@@ -163,7 +165,7 @@ class DiskCacheTestWithCache : public DiskCacheTest {
// cache_ will always have a valid object, regardless of how the cache was
// initialized. The implementation pointers can be NULL.
- scoped_ptr<disk_cache::Backend> cache_;
+ std::unique_ptr<disk_cache::Backend> cache_;
disk_cache::BackendImpl* cache_impl_;
disk_cache::SimpleBackendImpl* simple_cache_impl_;
disk_cache::MemBackendImpl* mem_cache_;
diff --git a/chromium/net/disk_cache/disk_cache_test_util.cc b/chromium/net/disk_cache/disk_cache_test_util.cc
index 0a2a27ecc8b..64e0a61c84b 100644
--- a/chromium/net/disk_cache/disk_cache_test_util.cc
+++ b/chromium/net/disk_cache/disk_cache_test_util.cc
@@ -7,7 +7,7 @@
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/logging.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "net/base/net_errors.h"
#include "net/disk_cache/blockfile/backend_impl.h"
#include "net/disk_cache/blockfile/file.h"
@@ -61,7 +61,7 @@ bool DeleteCache(const base::FilePath& path) {
bool CheckCacheIntegrity(const base::FilePath& path,
bool new_eviction,
uint32_t mask) {
- scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ std::unique_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
path, mask, base::ThreadTaskRunnerHandle::Get(), NULL));
if (!cache.get())
return false;
diff --git a/chromium/net/disk_cache/entry_unittest.cc b/chromium/net/disk_cache/entry_unittest.cc
index 22f793bb88b..07e7b2ae671 100644
--- a/chromium/net/disk_cache/entry_unittest.cc
+++ b/chromium/net/disk_cache/entry_unittest.cc
@@ -9,6 +9,7 @@
#include "base/files/file.h"
#include "base/files/file_util.h"
#include "base/macros.h"
+#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/threading/platform_thread.h"
@@ -21,6 +22,7 @@
#include "net/disk_cache/disk_cache_test_base.h"
#include "net/disk_cache/disk_cache_test_util.h"
#include "net/disk_cache/memory/mem_entry_impl.h"
+#include "net/disk_cache/simple/simple_backend_impl.h"
#include "net/disk_cache/simple/simple_entry_format.h"
#include "net/disk_cache/simple/simple_entry_impl.h"
#include "net/disk_cache/simple/simple_synchronous_entry.h"
@@ -1625,7 +1627,7 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
parent_entry->Close();
// Perform the enumerations.
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
disk_cache::Entry* entry = NULL;
int count = 0;
while (iter->OpenNextEntry(&entry) == net::OK) {
@@ -2185,7 +2187,8 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
// way to simulate a race is to execute what we want on the callback.
class SparseTestCompletionCallback: public net::TestCompletionCallback {
public:
- explicit SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)
+ explicit SparseTestCompletionCallback(
+ std::unique_ptr<disk_cache::Backend> cache)
: cache_(std::move(cache)) {}
private:
@@ -2194,7 +2197,7 @@ class SparseTestCompletionCallback: public net::TestCompletionCallback {
TestCompletionCallback::SetResult(result);
}
- scoped_ptr<disk_cache::Backend> cache_;
+ std::unique_ptr<disk_cache::Backend> cache_;
DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback);
};
@@ -2351,7 +2354,7 @@ TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
entry->Close();
EXPECT_EQ(4, cache_->GetEntryCount());
- scoped_ptr<TestIterator> iter = CreateIterator();
+ std::unique_ptr<TestIterator> iter = CreateIterator();
int count = 0;
std::string child_key[2];
while (iter->OpenNextEntry(&entry) == net::OK) {
@@ -2707,7 +2710,7 @@ TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
SetSimpleCacheMode();
InitCache();
- const char key[] = "the first key";
+ const std::string key("the first key");
disk_cache::Entry* entry = NULL;
ASSERT_EQ(net::OK, CreateEntry(key, &entry));
@@ -2727,9 +2730,8 @@ TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
int kTruncationBytes = -static_cast<int>(sizeof(disk_cache::SimpleFileEOF));
const base::FilePath entry_path = cache_path_.AppendASCII(
disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
- const int64_t invalid_size =
- disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
- kTruncationBytes);
+ const int64_t invalid_size = disk_cache::simple_util::GetFileSizeFromDataSize(
+ key.size(), kTruncationBytes);
EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
DisableIntegrityCheck();
@@ -3687,7 +3689,7 @@ TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
SetSimpleCacheMode();
InitCache();
disk_cache::Entry* entry = NULL;
- const char key[] = "the key";
+ const std::string key("the key");
const int kSize = 100;
scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize));
@@ -3725,7 +3727,7 @@ TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
int sparse_data_size = 0;
disk_cache::SimpleEntryStat entry_stat(
base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
- int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
+ int eof_offset = entry_stat.GetEOFOffsetInFile(key.size(), 0);
disk_cache::SimpleFileEOF eof_record;
ASSERT_EQ(static_cast<int>(sizeof(eof_record)),
entry_file0.Read(eof_offset, reinterpret_cast<char*>(&eof_record),
@@ -4164,3 +4166,102 @@ TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
entry->Close();
}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheReadWithoutKeySHA256) {
+ // This test runs as APP_CACHE to make operations more synchronous.
+ SetCacheType(net::APP_CACHE);
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* entry;
+ std::string key("a key");
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const std::string stream_0_data = "data for stream zero";
+ scoped_refptr<net::IOBuffer> stream_0_iobuffer(
+ new net::StringIOBuffer(stream_0_data));
+ EXPECT_EQ(static_cast<int>(stream_0_data.size()),
+ WriteData(entry, 0, 0, stream_0_iobuffer.get(),
+ stream_0_data.size(), false));
+ const std::string stream_1_data = "FOR STREAM ONE, QUITE DIFFERENT THINGS";
+ scoped_refptr<net::IOBuffer> stream_1_iobuffer(
+ new net::StringIOBuffer(stream_1_data));
+ EXPECT_EQ(static_cast<int>(stream_1_data.size()),
+ WriteData(entry, 1, 0, stream_1_iobuffer.get(),
+ stream_1_data.size(), false));
+ entry->Close();
+
+ base::RunLoop().RunUntilIdle();
+ disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(
+ disk_cache::simple_util::RemoveKeySHA256FromEntry(key, cache_path_));
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ ScopedEntryPtr entry_closer(entry);
+
+ EXPECT_EQ(static_cast<int>(stream_0_data.size()), entry->GetDataSize(0));
+ scoped_refptr<net::IOBuffer> check_stream_0_data(
+ new net::IOBuffer(stream_0_data.size()));
+ EXPECT_EQ(
+ static_cast<int>(stream_0_data.size()),
+ ReadData(entry, 0, 0, check_stream_0_data.get(), stream_0_data.size()));
+ EXPECT_EQ(0, stream_0_data.compare(0, std::string::npos,
+ check_stream_0_data->data(),
+ stream_0_data.size()));
+
+ EXPECT_EQ(static_cast<int>(stream_1_data.size()), entry->GetDataSize(1));
+ scoped_refptr<net::IOBuffer> check_stream_1_data(
+ new net::IOBuffer(stream_1_data.size()));
+ EXPECT_EQ(
+ static_cast<int>(stream_1_data.size()),
+ ReadData(entry, 1, 0, check_stream_1_data.get(), stream_1_data.size()));
+ EXPECT_EQ(0, stream_1_data.compare(0, std::string::npos,
+ check_stream_1_data->data(),
+ stream_1_data.size()));
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheDoubleOpenWithoutKeySHA256) {
+ // This test runs as APP_CACHE to make operations more synchronous.
+ SetCacheType(net::APP_CACHE);
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* entry;
+ std::string key("a key");
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Close();
+
+ base::RunLoop().RunUntilIdle();
+ disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(
+ disk_cache::simple_util::RemoveKeySHA256FromEntry(key, cache_path_));
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ entry->Close();
+
+ base::RunLoop().RunUntilIdle();
+ disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
+ base::RunLoop().RunUntilIdle();
+
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheReadCorruptKeySHA256) {
+ // This test runs as APP_CACHE to make operations more synchronous.
+ SetCacheType(net::APP_CACHE);
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* entry;
+ std::string key("a key");
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Close();
+
+ base::RunLoop().RunUntilIdle();
+ disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(
+ disk_cache::simple_util::CorruptKeySHA256FromEntry(key, cache_path_));
+ EXPECT_NE(net::OK, OpenEntry(key, &entry));
+}
diff --git a/chromium/net/disk_cache/memory/mem_backend_impl.cc b/chromium/net/disk_cache/memory/mem_backend_impl.cc
index 028c022f035..0ba2d3fef1c 100644
--- a/chromium/net/disk_cache/memory/mem_backend_impl.cc
+++ b/chromium/net/disk_cache/memory/mem_backend_impl.cc
@@ -49,9 +49,9 @@ MemBackendImpl::~MemBackendImpl() {
}
// static
-scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
- net::NetLog* net_log) {
- scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
+std::unique_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
+ net::NetLog* net_log) {
+ std::unique_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
cache->SetMaxSize(max_bytes);
if (cache->Init())
return std::move(cache);
@@ -242,8 +242,8 @@ class MemBackendImpl::MemIterator final : public Backend::Iterator {
base::LinkNode<MemEntryImpl>* current_;
};
-scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
- return scoped_ptr<Backend::Iterator>(
+std::unique_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
+ return std::unique_ptr<Backend::Iterator>(
new MemIterator(weak_factory_.GetWeakPtr()));
}
diff --git a/chromium/net/disk_cache/memory/mem_backend_impl.h b/chromium/net/disk_cache/memory/mem_backend_impl.h
index 8abcca5b7a2..f6ebbf7b0ce 100644
--- a/chromium/net/disk_cache/memory/mem_backend_impl.h
+++ b/chromium/net/disk_cache/memory/mem_backend_impl.h
@@ -10,9 +10,9 @@
#include <stdint.h>
#include <string>
+#include <unordered_map>
#include "base/compiler_specific.h"
-#include "base/containers/hash_tables.h"
#include "base/containers/linked_list.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
@@ -39,7 +39,8 @@ class NET_EXPORT_PRIVATE MemBackendImpl final : public Backend {
// size the cache can grow to. If zero is passed in as max_bytes, the cache
// will determine the value to use based on the available memory. The returned
// pointer can be NULL if a fatal error is found.
- static scoped_ptr<Backend> CreateBackend(int max_bytes, net::NetLog* net_log);
+ static std::unique_ptr<Backend> CreateBackend(int max_bytes,
+ net::NetLog* net_log);
// Performs general initialization for this current instance of the cache.
bool Init();
@@ -89,7 +90,7 @@ class NET_EXPORT_PRIVATE MemBackendImpl final : public Backend {
int DoomEntriesSince(base::Time initial_time,
const CompletionCallback& callback) override;
int CalculateSizeOfAllEntries(const CompletionCallback& callback) override;
- scoped_ptr<Iterator> CreateIterator() override;
+ std::unique_ptr<Iterator> CreateIterator() override;
void GetStats(base::StringPairs* stats) override {}
void OnExternalCacheHit(const std::string& key) override;
@@ -97,7 +98,7 @@ class NET_EXPORT_PRIVATE MemBackendImpl final : public Backend {
class MemIterator;
friend class MemIterator;
- typedef base::hash_map<std::string, MemEntryImpl*> EntryMap;
+ using EntryMap = std::unordered_map<std::string, MemEntryImpl*>;
// Deletes entries from the cache until the current size is below the limit.
void EvictIfNeeded();
diff --git a/chromium/net/disk_cache/memory/mem_entry_impl.cc b/chromium/net/disk_cache/memory/mem_entry_impl.cc
index bcbebb3cf2f..f1efa334af4 100644
--- a/chromium/net/disk_cache/memory/mem_entry_impl.cc
+++ b/chromium/net/disk_cache/memory/mem_entry_impl.cc
@@ -50,10 +50,10 @@ std::string GenerateChildName(const std::string& base_name, int child_id) {
// Returns NetLog parameters for the creation of a MemEntryImpl. A separate
// function is needed because child entries don't store their key().
-scoped_ptr<base::Value> NetLogEntryCreationCallback(
+std::unique_ptr<base::Value> NetLogEntryCreationCallback(
const MemEntryImpl* entry,
net::NetLogCaptureMode /* capture_mode */) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
std::string key;
switch (entry->type()) {
case MemEntryImpl::PARENT_ENTRY:
diff --git a/chromium/net/disk_cache/memory/mem_entry_impl.h b/chromium/net/disk_cache/memory/mem_entry_impl.h
index 11634e6ab9a..b366065be18 100644
--- a/chromium/net/disk_cache/memory/mem_entry_impl.h
+++ b/chromium/net/disk_cache/memory/mem_entry_impl.h
@@ -7,14 +7,14 @@
#include <stdint.h>
+#include <memory>
#include <string>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/containers/linked_list.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "net/disk_cache/disk_cache.h"
#include "net/log/net_log.h"
@@ -133,7 +133,7 @@ class NET_EXPORT_PRIVATE MemEntryImpl final
MemEntryImpl* parent,
net::NetLog* net_log);
- typedef base::hash_map<int, MemEntryImpl*> EntryMap;
+ using EntryMap = std::unordered_map<int, MemEntryImpl*>;
static const int kNumStreams = 3;
@@ -172,7 +172,7 @@ class NET_EXPORT_PRIVATE MemEntryImpl final
// entry.
// Pointer to the parent entry, or nullptr if this entry is a parent entry.
MemEntryImpl* parent_;
- scoped_ptr<EntryMap> children_;
+ std::unique_ptr<EntryMap> children_;
base::Time last_modified_;
base::Time last_used_;
diff --git a/chromium/net/disk_cache/net_log_parameters.cc b/chromium/net/disk_cache/net_log_parameters.cc
index 842ce25e638..9ab448ff92c 100644
--- a/chromium/net/disk_cache/net_log_parameters.cc
+++ b/chromium/net/disk_cache/net_log_parameters.cc
@@ -15,23 +15,23 @@
namespace {
-scoped_ptr<base::Value> NetLogEntryCreationCallback(
+std::unique_ptr<base::Value> NetLogEntryCreationCallback(
const disk_cache::Entry* entry,
bool created,
net::NetLogCaptureMode /* capture_mode */) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
dict->SetString("key", entry->GetKey());
dict->SetBoolean("created", created);
return std::move(dict);
}
-scoped_ptr<base::Value> NetLogReadWriteDataCallback(
+std::unique_ptr<base::Value> NetLogReadWriteDataCallback(
int index,
int offset,
int buf_len,
bool truncate,
net::NetLogCaptureMode /* capture_mode */) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
dict->SetInteger("index", index);
dict->SetInteger("offset", offset);
dict->SetInteger("buf_len", buf_len);
@@ -40,11 +40,11 @@ scoped_ptr<base::Value> NetLogReadWriteDataCallback(
return std::move(dict);
}
-scoped_ptr<base::Value> NetLogReadWriteCompleteCallback(
+std::unique_ptr<base::Value> NetLogReadWriteCompleteCallback(
int bytes_copied,
net::NetLogCaptureMode /* capture_mode */) {
DCHECK_NE(bytes_copied, net::ERR_IO_PENDING);
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
if (bytes_copied < 0) {
dict->SetInteger("net_error", bytes_copied);
} else {
@@ -53,11 +53,11 @@ scoped_ptr<base::Value> NetLogReadWriteCompleteCallback(
return std::move(dict);
}
-scoped_ptr<base::Value> NetLogSparseOperationCallback(
+std::unique_ptr<base::Value> NetLogSparseOperationCallback(
int64_t offset,
int buf_len,
net::NetLogCaptureMode /* capture_mode */) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
// Values can only be created with at most 32-bit integers. Using a string
// instead circumvents that restriction.
dict->SetString("offset", base::Int64ToString(offset));
@@ -65,21 +65,21 @@ scoped_ptr<base::Value> NetLogSparseOperationCallback(
return std::move(dict);
}
-scoped_ptr<base::Value> NetLogSparseReadWriteCallback(
+std::unique_ptr<base::Value> NetLogSparseReadWriteCallback(
const net::NetLog::Source& source,
int child_len,
net::NetLogCaptureMode /* capture_mode */) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
source.AddToEventParameters(dict.get());
dict->SetInteger("child_len", child_len);
return std::move(dict);
}
-scoped_ptr<base::Value> NetLogGetAvailableRangeResultCallback(
+std::unique_ptr<base::Value> NetLogGetAvailableRangeResultCallback(
int64_t start,
int result,
net::NetLogCaptureMode /* capture_mode */) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
if (result > 0) {
dict->SetInteger("length", result);
dict->SetString("start", base::Int64ToString(start));
diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.cc b/chromium/net/disk_cache/simple/simple_backend_impl.cc
index 00f6d15e129..4587bf9a3f8 100644
--- a/chromium/net/disk_cache/simple/simple_backend_impl.cc
+++ b/chromium/net/disk_cache/simple/simple_backend_impl.cc
@@ -8,6 +8,8 @@
#include <cstdlib>
#include <functional>
+#include "base/memory/ptr_util.h"
+
#if defined(OS_POSIX)
#include <sys/resource.h>
#endif
@@ -24,8 +26,8 @@
#include "base/single_thread_task_runner.h"
#include "base/sys_info.h"
#include "base/task_runner_util.h"
-#include "base/thread_task_runner_handle.h"
#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/base/net_errors.h"
#include "net/disk_cache/cache_util.h"
@@ -209,11 +211,11 @@ class SimpleBackendImpl::ActiveEntryProxy
}
}
- static scoped_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
+ static std::unique_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
int64_t entry_hash,
SimpleBackendImpl* backend) {
- scoped_ptr<SimpleEntryImpl::ActiveEntryProxy>
- proxy(new ActiveEntryProxy(entry_hash, backend));
+ std::unique_ptr<SimpleEntryImpl::ActiveEntryProxy> proxy(
+ new ActiveEntryProxy(entry_hash, backend));
return proxy;
}
@@ -243,18 +245,16 @@ SimpleBackendImpl::SimpleBackendImpl(
}
SimpleBackendImpl::~SimpleBackendImpl() {
- index_->WriteToDisk();
+ index_->WriteToDisk(SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN);
}
int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
worker_pool_ = g_sequenced_worker_pool.Get().GetTaskRunner();
index_.reset(new SimpleIndex(
- base::ThreadTaskRunnerHandle::Get(),
- this,
- cache_type_,
- make_scoped_ptr(new SimpleIndexFile(
- cache_thread_, worker_pool_.get(), cache_type_, path_))));
+ base::ThreadTaskRunnerHandle::Get(), this, cache_type_,
+ base::WrapUnique(new SimpleIndexFile(cache_thread_, worker_pool_.get(),
+ cache_type_, path_))));
index_->ExecuteWhenReady(
base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now()));
@@ -289,7 +289,7 @@ void SimpleBackendImpl::OnDoomStart(uint64_t entry_hash) {
void SimpleBackendImpl::OnDoomComplete(uint64_t entry_hash) {
DCHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
- base::hash_map<uint64_t, std::vector<Closure>>::iterator it =
+ std::unordered_map<uint64_t, std::vector<Closure>>::iterator it =
entries_pending_doom_.find(entry_hash);
std::vector<Closure> to_run_closures;
to_run_closures.swap(it->second);
@@ -301,7 +301,7 @@ void SimpleBackendImpl::OnDoomComplete(uint64_t entry_hash) {
void SimpleBackendImpl::DoomEntries(std::vector<uint64_t>* entry_hashes,
const net::CompletionCallback& callback) {
- scoped_ptr<std::vector<uint64_t>> mass_doom_entry_hashes(
+ std::unique_ptr<std::vector<uint64_t>> mass_doom_entry_hashes(
new std::vector<uint64_t>());
mass_doom_entry_hashes->swap(*entry_hashes);
@@ -378,7 +378,7 @@ int SimpleBackendImpl::OpenEntry(const std::string& key,
// TODO(gavinp): Factor out this (not quite completely) repetitive code
// block from OpenEntry/CreateEntry/DoomEntry.
- base::hash_map<uint64_t, std::vector<Closure>>::iterator it =
+ std::unordered_map<uint64_t, std::vector<Closure>>::iterator it =
entries_pending_doom_.find(entry_hash);
if (it != entries_pending_doom_.end()) {
Callback<int(const net::CompletionCallback&)> operation =
@@ -390,14 +390,7 @@ int SimpleBackendImpl::OpenEntry(const std::string& key,
}
scoped_refptr<SimpleEntryImpl> simple_entry =
CreateOrFindActiveEntry(entry_hash, key);
- CompletionCallback backend_callback =
- base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
- AsWeakPtr(),
- key,
- entry,
- simple_entry,
- callback);
- return simple_entry->OpenEntry(entry, backend_callback);
+ return simple_entry->OpenEntry(entry, callback);
}
int SimpleBackendImpl::CreateEntry(const std::string& key,
@@ -406,7 +399,7 @@ int SimpleBackendImpl::CreateEntry(const std::string& key,
DCHECK_LT(0u, key.size());
const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
- base::hash_map<uint64_t, std::vector<Closure>>::iterator it =
+ std::unordered_map<uint64_t, std::vector<Closure>>::iterator it =
entries_pending_doom_.find(entry_hash);
if (it != entries_pending_doom_.end()) {
Callback<int(const net::CompletionCallback&)> operation =
@@ -425,7 +418,7 @@ int SimpleBackendImpl::DoomEntry(const std::string& key,
const net::CompletionCallback& callback) {
const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
- base::hash_map<uint64_t, std::vector<Closure>>::iterator it =
+ std::unordered_map<uint64_t, std::vector<Closure>>::iterator it =
entries_pending_doom_.find(entry_hash);
if (it != entries_pending_doom_.end()) {
Callback<int(const net::CompletionCallback&)> operation =
@@ -530,12 +523,12 @@ class SimpleBackendImpl::SimpleIterator final : public Iterator {
private:
base::WeakPtr<SimpleBackendImpl> backend_;
- scoped_ptr<std::vector<uint64_t>> hashes_to_enumerate_;
+ std::unique_ptr<std::vector<uint64_t>> hashes_to_enumerate_;
base::WeakPtrFactory<SimpleIterator> weak_factory_;
};
-scoped_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() {
- return scoped_ptr<Iterator>(new SimpleIterator(AsWeakPtr()));
+std::unique_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() {
+ return std::unique_ptr<Iterator>(new SimpleIterator(AsWeakPtr()));
}
void SimpleBackendImpl::GetStats(base::StringPairs* stats) {
@@ -566,7 +559,7 @@ void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
callback.Run(result);
return;
}
- scoped_ptr<std::vector<uint64_t>> removed_key_hashes(
+ std::unique_ptr<std::vector<uint64_t>> removed_key_hashes(
index_->GetEntriesBetween(initial_time, end_time).release());
DoomEntries(removed_key_hashes.get(), callback);
}
@@ -631,7 +624,7 @@ scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
int SimpleBackendImpl::OpenEntryFromHash(uint64_t entry_hash,
Entry** entry,
const CompletionCallback& callback) {
- base::hash_map<uint64_t, std::vector<Closure>>::iterator it =
+ std::unordered_map<uint64_t, std::vector<Closure>>::iterator it =
entries_pending_doom_.find(entry_hash);
if (it != entries_pending_doom_.end()) {
Callback<int(const net::CompletionCallback&)> operation =
@@ -658,9 +651,9 @@ int SimpleBackendImpl::OpenEntryFromHash(uint64_t entry_hash,
int SimpleBackendImpl::DoomEntryFromHash(uint64_t entry_hash,
const CompletionCallback& callback) {
Entry** entry = new Entry*();
- scoped_ptr<Entry*> scoped_entry(entry);
+ std::unique_ptr<Entry*> scoped_entry(entry);
- base::hash_map<uint64_t, std::vector<Closure>>::iterator pending_it =
+ std::unordered_map<uint64_t, std::vector<Closure>>::iterator pending_it =
entries_pending_doom_.find(entry_hash);
if (pending_it != entries_pending_doom_.end()) {
Callback<int(const net::CompletionCallback&)> operation =
@@ -713,31 +706,8 @@ void SimpleBackendImpl::OnEntryOpenedFromHash(
}
}
-void SimpleBackendImpl::OnEntryOpenedFromKey(
- const std::string key,
- Entry** entry,
- const scoped_refptr<SimpleEntryImpl>& simple_entry,
- const CompletionCallback& callback,
- int error_code) {
- int final_code = error_code;
- if (final_code == net::OK) {
- bool key_matches = key.compare(simple_entry->key()) == 0;
- if (!key_matches) {
- // TODO(clamy): Add a unit test to check this code path.
- DLOG(WARNING) << "Key mismatch on open.";
- simple_entry->Doom();
- simple_entry->Close();
- final_code = net::ERR_FAILED;
- } else {
- DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
- }
- SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches);
- }
- callback.Run(final_code);
-}
-
void SimpleBackendImpl::DoomEntriesComplete(
- scoped_ptr<std::vector<uint64_t>> entry_hashes,
+ std::unique_ptr<std::vector<uint64_t>> entry_hashes,
const net::CompletionCallback& callback,
int result) {
for (const uint64_t& entry_hash : *entry_hashes)
diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.h b/chromium/net/disk_cache/simple/simple_backend_impl.h
index 6db40163761..e491359392b 100644
--- a/chromium/net/disk_cache/simple/simple_backend_impl.h
+++ b/chromium/net/disk_cache/simple/simple_backend_impl.h
@@ -7,16 +7,16 @@
#include <stdint.h>
+#include <memory>
#include <string>
+#include <unordered_map>
#include <utility>
#include <vector>
#include "base/callback_forward.h"
#include "base/compiler_specific.h"
-#include "base/containers/hash_tables.h"
#include "base/files/file_path.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/strings/string_split.h"
#include "base/task_runner.h"
@@ -108,7 +108,7 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend,
int DoomEntriesSince(base::Time initial_time,
const CompletionCallback& callback) override;
int CalculateSizeOfAllEntries(const CompletionCallback& callback) override;
- scoped_ptr<Iterator> CreateIterator() override;
+ std::unique_ptr<Iterator> CreateIterator() override;
void GetStats(base::StringPairs* stats) override;
void OnExternalCacheHit(const std::string& key) override;
@@ -116,10 +116,10 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend,
class SimpleIterator;
friend class SimpleIterator;
- typedef base::hash_map<uint64_t, SimpleEntryImpl*> EntryMap;
+ using EntryMap = std::unordered_map<uint64_t, SimpleEntryImpl*>;
- typedef base::Callback<void(base::Time mtime, uint64_t max_size, int result)>
- InitializeIndexCallback;
+ using InitializeIndexCallback =
+ base::Callback<void(base::Time mtime, uint64_t max_size, int result)>;
class ActiveEntryProxy;
friend class ActiveEntryProxy;
@@ -191,13 +191,13 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend,
// A callback thunk used by DoomEntries to clear the |entries_pending_doom_|
// after a mass doom.
- void DoomEntriesComplete(scoped_ptr<std::vector<uint64_t>> entry_hashes,
+ void DoomEntriesComplete(std::unique_ptr<std::vector<uint64_t>> entry_hashes,
const CompletionCallback& callback,
int result);
const base::FilePath path_;
const net::CacheType cache_type_;
- scoped_ptr<SimpleIndex> index_;
+ std::unique_ptr<SimpleIndex> index_;
const scoped_refptr<base::SingleThreadTaskRunner> cache_thread_;
scoped_refptr<base::TaskRunner> worker_pool_;
@@ -210,7 +210,8 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend,
// these entries cannot have Doom/Create/Open operations run until the doom
// is complete. The base::Closure map target is used to store deferred
// operations to be run at the completion of the Doom.
- base::hash_map<uint64_t, std::vector<base::Closure>> entries_pending_doom_;
+ std::unordered_map<uint64_t, std::vector<base::Closure>>
+ entries_pending_doom_;
net::NetLog* const net_log_;
};
diff --git a/chromium/net/disk_cache/simple/simple_backend_version.h b/chromium/net/disk_cache/simple/simple_backend_version.h
index d6ddf9b61ca..604d0e1a596 100644
--- a/chromium/net/disk_cache/simple/simple_backend_version.h
+++ b/chromium/net/disk_cache/simple/simple_backend_version.h
@@ -18,7 +18,7 @@ namespace disk_cache {
// |kSimpleVersion - 1| then the whole cache directory will be cleared.
// * Dropping cache data on disk or some of its parts can be a valid way to
// Upgrade.
-const uint32_t kSimpleVersion = 6;
+const uint32_t kSimpleVersion = 7;
// The version of the entry file(s) as written to disk. Must be updated iff the
// entry format changes with the overall backend version update.
diff --git a/chromium/net/disk_cache/simple/simple_entry_format.h b/chromium/net/disk_cache/simple/simple_entry_format.h
index aec7162f2c2..30a369358fb 100644
--- a/chromium/net/disk_cache/simple/simple_entry_format.h
+++ b/chromium/net/disk_cache/simple/simple_entry_format.h
@@ -25,7 +25,15 @@ const uint64_t kSimpleSparseRangeMagicNumber = UINT64_C(0xeb97bf016553676b);
// - the data from stream 1.
// - a SimpleFileEOF record for stream 1.
// - the data from stream 0.
+// - (optionally) the SHA256 of the key.
// - a SimpleFileEOF record for stream 0.
+//
+// Because stream 0 data (typically HTTP headers) is on the critical path of
+// requests, on open, the cache reads the end of the record and does not
+// read the SimpleFileHeader. If the key can be validated with a SHA256, then
+// the stream 0 data can be returned to the caller without reading the
+// SimpleFileHeader. If the key SHA256 is not present, then the cache must
+// read the SimpleFileHeader to confirm key equality.
// A file containing stream 2 in the Simple cache consists of:
// - a SimpleFileHeader.
@@ -47,6 +55,7 @@ struct NET_EXPORT_PRIVATE SimpleFileHeader {
struct NET_EXPORT_PRIVATE SimpleFileEOF {
enum Flags {
FLAG_HAS_CRC32 = (1U << 0),
+ FLAG_HAS_KEY_SHA256 = (1U << 1), // Preceding the record if present.
};
SimpleFileEOF();
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.cc b/chromium/net/disk_cache/simple/simple_entry_impl.cc
index 68f1b9e709a..8169b05a6b1 100644
--- a/chromium/net/disk_cache/simple/simple_entry_impl.cc
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.cc
@@ -18,7 +18,7 @@
#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_runner_util.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
@@ -81,8 +81,8 @@ void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
"WriteResult2", cache_type, result, WRITE_RESULT_MAX);
}
-// TODO(ttuttle): Consider removing this once we have a good handle on header
-// size changes.
+// TODO(juliatuttle): Consider removing this once we have a good handle on
+// header size changes.
void RecordHeaderSizeChange(net::CacheType cache_type,
int old_size, int new_size) {
HeaderSizeChange size_change;
@@ -200,7 +200,7 @@ SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
}
void SimpleEntryImpl::SetActiveEntryProxy(
- scoped_ptr<ActiveEntryProxy> active_entry_proxy) {
+ std::unique_ptr<ActiveEntryProxy> active_entry_proxy) {
DCHECK(!active_entry_proxy_);
active_entry_proxy_.reset(active_entry_proxy.release());
}
@@ -521,7 +521,7 @@ int SimpleEntryImpl::GetAvailableRange(int64_t offset,
bool SimpleEntryImpl::CouldBeSparse() const {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(ttuttle): Actually check.
+ // TODO(juliatuttle): Actually check.
return true;
}
@@ -600,7 +600,7 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() {
"EntryOperationsPending", cache_type_,
pending_operations_.size(), 0, 100, 20);
if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
- scoped_ptr<SimpleEntryOperation> operation(
+ std::unique_ptr<SimpleEntryOperation> operation(
new SimpleEntryOperation(pending_operations_.front()));
pending_operations_.pop();
switch (operation->type()) {
@@ -693,23 +693,16 @@ void SimpleEntryImpl::OpenEntryInternal(bool have_index,
DCHECK(!synchronous_entry_);
state_ = STATE_IO_PENDING;
const base::TimeTicks start_time = base::TimeTicks::Now();
- scoped_ptr<SimpleEntryCreationResults> results(
- new SimpleEntryCreationResults(
- SimpleEntryStat(last_used_, last_modified_, data_size_,
- sparse_data_size_)));
- Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
- cache_type_,
- path_,
- entry_hash_,
- have_index,
- results.get());
- Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
- this,
- callback,
- start_time,
- base::Passed(&results),
- out_entry,
- net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
+ std::unique_ptr<SimpleEntryCreationResults> results(
+ new SimpleEntryCreationResults(SimpleEntryStat(
+ last_used_, last_modified_, data_size_, sparse_data_size_)));
+ Closure task =
+ base::Bind(&SimpleSynchronousEntry::OpenEntry, cache_type_, path_, key_,
+ entry_hash_, have_index, results.get());
+ Closure reply =
+ base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, callback,
+ start_time, base::Passed(&results), out_entry,
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
}
@@ -742,10 +735,9 @@ void SimpleEntryImpl::CreateEntryInternal(bool have_index,
have_written_[i] = true;
const base::TimeTicks start_time = base::TimeTicks::Now();
- scoped_ptr<SimpleEntryCreationResults> results(
- new SimpleEntryCreationResults(
- SimpleEntryStat(last_used_, last_modified_, data_size_,
- sparse_data_size_)));
+ std::unique_ptr<SimpleEntryCreationResults> results(
+ new SimpleEntryCreationResults(SimpleEntryStat(
+ last_used_, last_modified_, data_size_, sparse_data_size_)));
Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
cache_type_,
path_,
@@ -766,8 +758,8 @@ void SimpleEntryImpl::CreateEntryInternal(bool have_index,
void SimpleEntryImpl::CloseInternal() {
DCHECK(io_thread_checker_.CalledOnValidThread());
typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
- scoped_ptr<std::vector<CRCRecord> >
- crc32s_to_write(new std::vector<CRCRecord>());
+ std::unique_ptr<std::vector<CRCRecord>> crc32s_to_write(
+ new std::vector<CRCRecord>());
net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
@@ -868,11 +860,10 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index,
if (!doomed_ && backend_.get())
backend_->index()->UseIfExists(entry_hash_);
- scoped_ptr<uint32_t> read_crc32(new uint32_t());
- scoped_ptr<int> result(new int());
- scoped_ptr<SimpleEntryStat> entry_stat(
- new SimpleEntryStat(last_used_, last_modified_, data_size_,
- sparse_data_size_));
+ std::unique_ptr<uint32_t> read_crc32(new uint32_t());
+ std::unique_ptr<int> result(new int());
+ std::unique_ptr<SimpleEntryStat> entry_stat(new SimpleEntryStat(
+ last_used_, last_modified_, data_size_, sparse_data_size_));
Closure task = base::Bind(
&SimpleSynchronousEntry::ReadData, base::Unretained(synchronous_entry_),
SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
@@ -950,9 +941,8 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
AdvanceCrc(buf, offset, buf_len, stream_index);
// |entry_stat| needs to be initialized before modifying |data_size_|.
- scoped_ptr<SimpleEntryStat> entry_stat(
- new SimpleEntryStat(last_used_, last_modified_, data_size_,
- sparse_data_size_));
+ std::unique_ptr<SimpleEntryStat> entry_stat(new SimpleEntryStat(
+ last_used_, last_modified_, data_size_, sparse_data_size_));
if (truncate) {
data_size_[stream_index] = offset + buf_len;
} else {
@@ -970,7 +960,7 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
if (stream_index == 1)
have_written_[0] = true;
- scoped_ptr<int> result(new int());
+ std::unique_ptr<int> result(new int());
Closure task = base::Bind(
&SimpleSynchronousEntry::WriteData, base::Unretained(synchronous_entry_),
SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len,
@@ -1002,8 +992,8 @@ void SimpleEntryImpl::ReadSparseDataInternal(
DCHECK_EQ(STATE_READY, state_);
state_ = STATE_IO_PENDING;
- scoped_ptr<int> result(new int());
- scoped_ptr<base::Time> last_used(new base::Time());
+ std::unique_ptr<int> result(new int());
+ std::unique_ptr<base::Time> last_used(new base::Time());
Closure task = base::Bind(
&SimpleSynchronousEntry::ReadSparseData,
base::Unretained(synchronous_entry_),
@@ -1040,13 +1030,12 @@ void SimpleEntryImpl::WriteSparseDataInternal(
max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
}
- scoped_ptr<SimpleEntryStat> entry_stat(
- new SimpleEntryStat(last_used_, last_modified_, data_size_,
- sparse_data_size_));
+ std::unique_ptr<SimpleEntryStat> entry_stat(new SimpleEntryStat(
+ last_used_, last_modified_, data_size_, sparse_data_size_));
last_used_ = last_modified_ = base::Time::Now();
- scoped_ptr<int> result(new int());
+ std::unique_ptr<int> result(new int());
Closure task = base::Bind(
&SimpleSynchronousEntry::WriteSparseData,
base::Unretained(synchronous_entry_),
@@ -1072,7 +1061,7 @@ void SimpleEntryImpl::GetAvailableRangeInternal(
DCHECK_EQ(STATE_READY, state_);
state_ = STATE_IO_PENDING;
- scoped_ptr<int> result(new int());
+ std::unique_ptr<int> result(new int());
Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
base::Unretained(synchronous_entry_),
SimpleSynchronousEntry::EntryOperationData(
@@ -1119,7 +1108,7 @@ void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
void SimpleEntryImpl::CreationOperationComplete(
const CompletionCallback& completion_callback,
const base::TimeTicks& start_time,
- scoped_ptr<SimpleEntryCreationResults> in_results,
+ std::unique_ptr<SimpleEntryCreationResults> in_results,
Entry** out_entry,
net::NetLog::EventType end_event_type) {
DCHECK(io_thread_checker_.CalledOnValidThread());
@@ -1152,11 +1141,14 @@ void SimpleEntryImpl::CreationOperationComplete(
crc32s_[0] = in_results->stream_0_crc32;
crc32s_end_offset_[0] = in_results->entry_stat.data_size(0);
}
+ // If this entry was opened by hash, key_ could still be empty. If so, update
+ // it with the key read from the synchronous entry.
if (key_.empty()) {
SetKey(synchronous_entry_->key());
} else {
- // This should only be triggered when creating an entry. The key check in
- // the open case is handled in SimpleBackendImpl.
+ // This should only be triggered when creating an entry. In the open case
+ // the key is either copied from the arguments to open, or checked
+ // in the synchronous entry.
DCHECK_EQ(key_, synchronous_entry_->key());
}
UpdateDataFromEntryStat(in_results->entry_stat);
@@ -1172,7 +1164,7 @@ void SimpleEntryImpl::CreationOperationComplete(
void SimpleEntryImpl::EntryOperationComplete(
const CompletionCallback& completion_callback,
const SimpleEntryStat& entry_stat,
- scoped_ptr<int> result) {
+ std::unique_ptr<int> result) {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(synchronous_entry_);
DCHECK_EQ(STATE_IO_PENDING, state_);
@@ -1196,9 +1188,9 @@ void SimpleEntryImpl::ReadOperationComplete(
int stream_index,
int offset,
const CompletionCallback& completion_callback,
- scoped_ptr<uint32_t> read_crc32,
- scoped_ptr<SimpleEntryStat> entry_stat,
- scoped_ptr<int> result) {
+ std::unique_ptr<uint32_t> read_crc32,
+ std::unique_ptr<SimpleEntryStat> entry_stat,
+ std::unique_ptr<int> result) {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(synchronous_entry_);
DCHECK_EQ(STATE_IO_PENDING, state_);
@@ -1224,7 +1216,7 @@ void SimpleEntryImpl::ReadOperationComplete(
net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
- scoped_ptr<int> new_result(new int());
+ std::unique_ptr<int> new_result(new int());
Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
base::Unretained(synchronous_entry_),
stream_index,
@@ -1266,8 +1258,8 @@ void SimpleEntryImpl::ReadOperationComplete(
void SimpleEntryImpl::WriteOperationComplete(
int stream_index,
const CompletionCallback& completion_callback,
- scoped_ptr<SimpleEntryStat> entry_stat,
- scoped_ptr<int> result) {
+ std::unique_ptr<SimpleEntryStat> entry_stat,
+ std::unique_ptr<int> result) {
if (*result >= 0)
RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
else
@@ -1286,8 +1278,8 @@ void SimpleEntryImpl::WriteOperationComplete(
void SimpleEntryImpl::ReadSparseOperationComplete(
const CompletionCallback& completion_callback,
- scoped_ptr<base::Time> last_used,
- scoped_ptr<int> result) {
+ std::unique_ptr<base::Time> last_used,
+ std::unique_ptr<int> result) {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(synchronous_entry_);
DCHECK(result);
@@ -1304,8 +1296,8 @@ void SimpleEntryImpl::ReadSparseOperationComplete(
void SimpleEntryImpl::WriteSparseOperationComplete(
const CompletionCallback& completion_callback,
- scoped_ptr<SimpleEntryStat> entry_stat,
- scoped_ptr<int> result) {
+ std::unique_ptr<SimpleEntryStat> entry_stat,
+ std::unique_ptr<int> result) {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(synchronous_entry_);
DCHECK(result);
@@ -1320,7 +1312,7 @@ void SimpleEntryImpl::WriteSparseOperationComplete(
void SimpleEntryImpl::GetAvailableRangeOperationComplete(
const CompletionCallback& completion_callback,
- scoped_ptr<int> result) {
+ std::unique_ptr<int> result) {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(synchronous_entry_);
DCHECK(result);
@@ -1346,7 +1338,7 @@ void SimpleEntryImpl::ChecksumOperationComplete(
int orig_result,
int stream_index,
const CompletionCallback& completion_callback,
- scoped_ptr<int> result) {
+ std::unique_ptr<int> result) {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(synchronous_entry_);
DCHECK_EQ(STATE_IO_PENDING, state_);
@@ -1408,7 +1400,7 @@ int64_t SimpleEntryImpl::GetDiskUsage() const {
int64_t file_size = 0;
for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
file_size +=
- simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
+ simple_util::GetFileSizeFromDataSize(key_.size(), data_size_[i]);
}
file_size += sparse_data_size_;
return file_size;
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.h b/chromium/net/disk_cache/simple/simple_entry_impl.h
index 4d41edc00f2..3a0317c0ddb 100644
--- a/chromium/net/disk_cache/simple/simple_entry_impl.h
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.h
@@ -7,12 +7,12 @@
#include <stdint.h>
+#include <memory>
#include <queue>
#include <string>
#include "base/files/file_path.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
#include "base/threading/thread_checker.h"
#include "net/base/cache_type.h"
#include "net/base/net_export.h"
@@ -65,7 +65,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
net::NetLog* net_log);
void SetActiveEntryProxy(
- scoped_ptr<ActiveEntryProxy> active_entry_proxy);
+ std::unique_ptr<ActiveEntryProxy> active_entry_proxy);
// Adds another reader/writer to this entry, if possible, returning |this| to
// |entry|.
@@ -79,6 +79,11 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
const std::string& key() const { return key_; }
uint64_t entry_hash() const { return entry_hash_; }
+
+ // The key is not a constructor parameter to the SimpleEntryImpl, because
+ // during cache iteration, it's necessary to open entries by their hash
+ // alone. In that case, the SimpleSynchronousEntry will read the key from disk
+ // and it will be set.
void SetKey(const std::string& key);
// From Entry:
@@ -219,7 +224,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
void CreationOperationComplete(
const CompletionCallback& completion_callback,
const base::TimeTicks& start_time,
- scoped_ptr<SimpleEntryCreationResults> in_results,
+ std::unique_ptr<SimpleEntryCreationResults> in_results,
Entry** out_entry,
net::NetLog::EventType end_event_type);
@@ -232,35 +237,35 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
// |completion_callback| after updating state and dooming on errors.
void EntryOperationComplete(const CompletionCallback& completion_callback,
const SimpleEntryStat& entry_stat,
- scoped_ptr<int> result);
+ std::unique_ptr<int> result);
// Called after an asynchronous read. Updates |crc32s_| if possible.
void ReadOperationComplete(int stream_index,
int offset,
const CompletionCallback& completion_callback,
- scoped_ptr<uint32_t> read_crc32,
- scoped_ptr<SimpleEntryStat> entry_stat,
- scoped_ptr<int> result);
+ std::unique_ptr<uint32_t> read_crc32,
+ std::unique_ptr<SimpleEntryStat> entry_stat,
+ std::unique_ptr<int> result);
// Called after an asynchronous write completes.
void WriteOperationComplete(int stream_index,
const CompletionCallback& completion_callback,
- scoped_ptr<SimpleEntryStat> entry_stat,
- scoped_ptr<int> result);
+ std::unique_ptr<SimpleEntryStat> entry_stat,
+ std::unique_ptr<int> result);
void ReadSparseOperationComplete(
const CompletionCallback& completion_callback,
- scoped_ptr<base::Time> last_used,
- scoped_ptr<int> result);
+ std::unique_ptr<base::Time> last_used,
+ std::unique_ptr<int> result);
void WriteSparseOperationComplete(
const CompletionCallback& completion_callback,
- scoped_ptr<SimpleEntryStat> entry_stat,
- scoped_ptr<int> result);
+ std::unique_ptr<SimpleEntryStat> entry_stat,
+ std::unique_ptr<int> result);
void GetAvailableRangeOperationComplete(
const CompletionCallback& completion_callback,
- scoped_ptr<int> result);
+ std::unique_ptr<int> result);
// Called after an asynchronous doom completes.
void DoomOperationComplete(const CompletionCallback& callback,
@@ -270,11 +275,10 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
// Called after validating the checksums on an entry. Passes through the
// original result if successful, propagates the error if the checksum does
// not validate.
- void ChecksumOperationComplete(
- int stream_index,
- int orig_result,
- const CompletionCallback& completion_callback,
- scoped_ptr<int> result);
+ void ChecksumOperationComplete(int stream_index,
+ int orig_result,
+ const CompletionCallback& completion_callback,
+ std::unique_ptr<int> result);
// Called after completion of asynchronous IO and receiving file metadata for
// the entry in |entry_stat|. Updates the metadata in the entry and in the
@@ -304,7 +308,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
int length,
int stream_index);
- scoped_ptr<ActiveEntryProxy> active_entry_proxy_;
+ std::unique_ptr<ActiveEntryProxy> active_entry_proxy_;
// All nonstatic SimpleEntryImpl methods should always be called on the IO
// thread, in all cases. |io_thread_checker_| documents and enforces this.
@@ -366,7 +370,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
net::BoundNetLog net_log_;
- scoped_ptr<SimpleEntryOperation> executing_operation_;
+ std::unique_ptr<SimpleEntryOperation> executing_operation_;
// Unlike other streams, stream 0 data is read from the disk when the entry is
// opened, and then kept in memory. All read/write operations on stream 0
diff --git a/chromium/net/disk_cache/simple/simple_index.cc b/chromium/net/disk_cache/simple/simple_index.cc
index f6ccd3eb1dd..8689b382fbd 100644
--- a/chromium/net/disk_cache/simple/simple_index.cc
+++ b/chromium/net/disk_cache/simple/simple_index.cc
@@ -144,7 +144,7 @@ SimpleIndex::SimpleIndex(
const scoped_refptr<base::SingleThreadTaskRunner>& io_thread,
SimpleIndexDelegate* delegate,
net::CacheType cache_type,
- scoped_ptr<SimpleIndexFile> index_file)
+ std::unique_ptr<SimpleIndexFile> index_file)
: delegate_(delegate),
cache_type_(cache_type),
cache_size_(0),
@@ -158,7 +158,9 @@ SimpleIndex::SimpleIndex(
io_thread_(io_thread),
// Creating the callback once so it is reused every time
// write_to_disk_timer_.Start() is called.
- write_to_disk_cb_(base::Bind(&SimpleIndex::WriteToDisk, AsWeakPtr())),
+ write_to_disk_cb_(base::Bind(&SimpleIndex::WriteToDisk,
+ AsWeakPtr(),
+ INDEX_WRITE_REASON_IDLE)),
app_on_background_(false) {}
SimpleIndex::~SimpleIndex() {
@@ -182,7 +184,7 @@ void SimpleIndex::Initialize(base::Time cache_mtime) {
#endif
SimpleIndexLoadResult* load_result = new SimpleIndexLoadResult();
- scoped_ptr<SimpleIndexLoadResult> load_result_scoped(load_result);
+ std::unique_ptr<SimpleIndexLoadResult> load_result_scoped(load_result);
base::Closure reply = base::Bind(
&SimpleIndex::MergeInitializingSet,
AsWeakPtr(),
@@ -208,8 +210,9 @@ int SimpleIndex::ExecuteWhenReady(const net::CompletionCallback& task) {
return net::ERR_IO_PENDING;
}
-scoped_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
- base::Time initial_time, base::Time end_time) {
+std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
+ base::Time initial_time,
+ base::Time end_time) {
DCHECK_EQ(true, initialized_);
if (!initial_time.is_null())
@@ -221,7 +224,7 @@ scoped_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
const base::Time extended_end_time =
end_time.is_null() ? base::Time::Max() : end_time;
DCHECK(extended_end_time >= initial_time);
- scoped_ptr<HashList> ret_hashes(new HashList());
+ std::unique_ptr<HashList> ret_hashes(new HashList());
for (EntrySet::iterator it = entries_set_.begin(), end = entries_set_.end();
it != end; ++it) {
EntryMetadata& metadata = it->second;
@@ -232,7 +235,7 @@ scoped_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
return ret_hashes;
}
-scoped_ptr<SimpleIndex::HashList> SimpleIndex::GetAllHashes() {
+std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetAllHashes() {
return GetEntriesBetween(base::Time(), base::Time());
}
@@ -395,13 +398,14 @@ void SimpleIndex::UpdateEntryIteratorSize(EntrySet::iterator* it,
}
void SimpleIndex::MergeInitializingSet(
- scoped_ptr<SimpleIndexLoadResult> load_result) {
+ std::unique_ptr<SimpleIndexLoadResult> load_result) {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(load_result->did_load);
EntrySet* index_file_entries = &load_result->entries;
- for (base::hash_set<uint64_t>::const_iterator it = removed_entries_.begin();
+ for (std::unordered_set<uint64_t>::const_iterator it =
+ removed_entries_.begin();
it != removed_entries_.end(); ++it) {
index_file_entries->erase(*it);
}
@@ -431,7 +435,7 @@ void SimpleIndex::MergeInitializingSet(
// The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
// merge down much.
if (load_result->flush_required)
- WriteToDisk();
+ WriteToDisk(INDEX_WRITE_REASON_STARTUP_MERGE);
SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
"IndexInitializationWaiters", cache_type_,
@@ -455,12 +459,12 @@ void SimpleIndex::OnApplicationStateChange(
} else if (state ==
base::android::APPLICATION_STATE_HAS_STOPPED_ACTIVITIES) {
app_on_background_ = true;
- WriteToDisk();
+ WriteToDisk(INDEX_WRITE_REASON_ANDROID_STOPPED);
}
}
#endif
-void SimpleIndex::WriteToDisk() {
+void SimpleIndex::WriteToDisk(IndexWriteToDiskReason reason) {
DCHECK(io_thread_checker_.CalledOnValidThread());
if (!initialized_)
return;
@@ -481,8 +485,8 @@ void SimpleIndex::WriteToDisk() {
}
last_write_to_disk_ = start;
- index_file_->WriteToDisk(entries_set_, cache_size_,
- start, app_on_background_, base::Closure());
+ index_file_->WriteToDisk(reason, entries_set_, cache_size_, start,
+ app_on_background_, base::Closure());
}
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_index.h b/chromium/net/disk_cache/simple/simple_index.h
index ed37bd076f4..724891bfaeb 100644
--- a/chromium/net/disk_cache/simple/simple_index.h
+++ b/chromium/net/disk_cache/simple/simple_index.h
@@ -8,14 +8,15 @@
#include <stdint.h>
#include <list>
+#include <memory>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "base/callback.h"
-#include "base/containers/hash_tables.h"
#include "base/files/file_path.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_checker.h"
@@ -85,13 +86,21 @@ class NET_EXPORT_PRIVATE SimpleIndex
INITIALIZE_METHOD_NEWCACHE = 2,
INITIALIZE_METHOD_MAX = 3,
};
+ // Used in histograms. Please only add entries at the end.
+ enum IndexWriteToDiskReason {
+ INDEX_WRITE_REASON_SHUTDOWN = 0,
+ INDEX_WRITE_REASON_STARTUP_MERGE = 1,
+ INDEX_WRITE_REASON_IDLE = 2,
+ INDEX_WRITE_REASON_ANDROID_STOPPED = 3,
+ INDEX_WRITE_REASON_MAX = 4,
+ };
typedef std::vector<uint64_t> HashList;
SimpleIndex(const scoped_refptr<base::SingleThreadTaskRunner>& io_thread,
SimpleIndexDelegate* delegate,
net::CacheType cache_type,
- scoped_ptr<SimpleIndexFile> simple_index_file);
+ std::unique_ptr<SimpleIndexFile> simple_index_file);
virtual ~SimpleIndex();
@@ -110,14 +119,14 @@ class NET_EXPORT_PRIVATE SimpleIndex
// iff the entry exist in the index.
bool UseIfExists(uint64_t entry_hash);
- void WriteToDisk();
+ void WriteToDisk(IndexWriteToDiskReason reason);
// Update the size (in bytes) of an entry, in the metadata stored in the
// index. This should be the total disk-file size including all streams of the
// entry.
bool UpdateEntrySize(uint64_t entry_hash, int64_t entry_size);
- typedef base::hash_map<uint64_t, EntryMetadata> EntrySet;
+ using EntrySet = std::unordered_map<uint64_t, EntryMetadata>;
static void InsertInEntrySet(uint64_t entry_hash,
const EntryMetadata& entry_metadata,
@@ -130,11 +139,11 @@ class NET_EXPORT_PRIVATE SimpleIndex
// range between |initial_time| and |end_time| where open intervals are
// possible according to the definition given in |DoomEntriesBetween()| in the
// disk cache backend interface.
- scoped_ptr<HashList> GetEntriesBetween(const base::Time initial_time,
- const base::Time end_time);
+ std::unique_ptr<HashList> GetEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
// Returns the list of all entries key hash.
- scoped_ptr<HashList> GetAllHashes();
+ std::unique_ptr<HashList> GetAllHashes();
// Returns number of indexed entries.
int32_t GetEntryCount() const;
@@ -163,12 +172,13 @@ class NET_EXPORT_PRIVATE SimpleIndex
void UpdateEntryIteratorSize(EntrySet::iterator* it, int64_t entry_size);
// Must run on IO Thread.
- void MergeInitializingSet(scoped_ptr<SimpleIndexLoadResult> load_result);
+ void MergeInitializingSet(std::unique_ptr<SimpleIndexLoadResult> load_result);
#if defined(OS_ANDROID)
void OnApplicationStateChange(base::android::ApplicationState state);
- scoped_ptr<base::android::ApplicationStatusListener> app_status_listener_;
+ std::unique_ptr<base::android::ApplicationStatusListener>
+ app_status_listener_;
#endif
// The owner of |this| must ensure the |delegate_| outlives |this|.
@@ -186,11 +196,11 @@ class NET_EXPORT_PRIVATE SimpleIndex
// This stores all the entry_hash of entries that are removed during
// initialization.
- base::hash_set<uint64_t> removed_entries_;
+ std::unordered_set<uint64_t> removed_entries_;
bool initialized_;
IndexInitMethod init_method_;
- scoped_ptr<SimpleIndexFile> index_file_;
+ std::unique_ptr<SimpleIndexFile> index_file_;
scoped_refptr<base::SingleThreadTaskRunner> io_thread_;
diff --git a/chromium/net/disk_cache/simple/simple_index_file.cc b/chromium/net/disk_cache/simple/simple_index_file.cc
index c23d64e6bd8..99eafc36fe2 100644
--- a/chromium/net/disk_cache/simple/simple_index_file.cc
+++ b/chromium/net/disk_cache/simple/simple_index_file.cc
@@ -33,7 +33,7 @@ namespace {
const int kEntryFilesHashLength = 16;
const int kEntryFilesSuffixLength = 2;
-const uint64_t kMaxEntiresInIndex = 100000000;
+const uint64_t kMaxEntriesInIndex = 100000000;
uint32_t CalculatePickleCRC(const base::Pickle& pickle) {
return crc32(crc32(0, Z_NULL, 0),
@@ -50,6 +50,14 @@ enum IndexFileState {
INDEX_STATE_MAX = 4,
};
+enum StaleIndexQuality {
+ STALE_INDEX_OK = 0,
+ STALE_INDEX_MISSED_ENTRIES = 1,
+ STALE_INDEX_EXTRA_ENTRIES = 2,
+ STALE_INDEX_BOTH_MISSED_AND_EXTRA_ENTRIES = 3,
+ STALE_INDEX_MAX = 4,
+};
+
void UmaRecordIndexFileState(IndexFileState state, net::CacheType cache_type) {
SIMPLE_CACHE_UMA(ENUMERATION,
"IndexFileStateOnLoad", cache_type, state, INDEX_STATE_MAX);
@@ -61,6 +69,39 @@ void UmaRecordIndexInitMethod(SimpleIndex::IndexInitMethod method,
SimpleIndex::INITIALIZE_METHOD_MAX);
}
+void UmaRecordIndexWriteReason(SimpleIndex::IndexWriteToDiskReason reason,
+ net::CacheType cache_type) {
+ SIMPLE_CACHE_UMA(ENUMERATION, "IndexWriteReason", cache_type, reason,
+ SimpleIndex::INDEX_WRITE_REASON_MAX);
+}
+
+void UmaRecordIndexWriteReasonAtLoad(SimpleIndex::IndexWriteToDiskReason reason,
+ net::CacheType cache_type) {
+ SIMPLE_CACHE_UMA(ENUMERATION, "IndexWriteReasonAtLoad", cache_type, reason,
+ SimpleIndex::INDEX_WRITE_REASON_MAX);
+}
+
+void UmaRecordStaleIndexQuality(int missed_entry_count,
+ int extra_entry_count,
+ net::CacheType cache_type) {
+ SIMPLE_CACHE_UMA(CUSTOM_COUNTS, "StaleIndexMissedEntryCount", cache_type,
+ missed_entry_count, 1, 100, 5);
+ SIMPLE_CACHE_UMA(CUSTOM_COUNTS, "StaleIndexExtraEntryCount", cache_type,
+ extra_entry_count, 1, 100, 5);
+
+ StaleIndexQuality quality;
+ if (missed_entry_count > 0 && extra_entry_count > 0)
+ quality = STALE_INDEX_BOTH_MISSED_AND_EXTRA_ENTRIES;
+ else if (missed_entry_count > 0)
+ quality = STALE_INDEX_MISSED_ENTRIES;
+ else if (extra_entry_count > 0)
+ quality = STALE_INDEX_EXTRA_ENTRIES;
+ else
+ quality = STALE_INDEX_OK;
+ SIMPLE_CACHE_UMA(ENUMERATION, "StaleIndexQuality", cache_type, quality,
+ STALE_INDEX_MAX);
+}
+
bool WritePickleFile(base::Pickle* pickle, const base::FilePath& file_name) {
File file(
file_name,
@@ -126,15 +167,17 @@ void ProcessEntryFile(SimpleIndex::EntrySet* entries,
} // namespace
-SimpleIndexLoadResult::SimpleIndexLoadResult() : did_load(false),
- flush_required(false) {
-}
+SimpleIndexLoadResult::SimpleIndexLoadResult()
+ : did_load(false),
+ index_write_reason(SimpleIndex::INDEX_WRITE_REASON_MAX),
+ flush_required(false) {}
SimpleIndexLoadResult::~SimpleIndexLoadResult() {
}
void SimpleIndexLoadResult::Reset() {
did_load = false;
+ index_write_reason = SimpleIndex::INDEX_WRITE_REASON_MAX;
flush_required = false;
entries.clear();
}
@@ -149,22 +192,27 @@ const char SimpleIndexFile::kTempIndexFileName[] = "temp-index";
SimpleIndexFile::IndexMetadata::IndexMetadata()
: magic_number_(kSimpleIndexMagicNumber),
version_(kSimpleVersion),
- number_of_entries_(0),
+ reason_(SimpleIndex::INDEX_WRITE_REASON_MAX),
+ entry_count_(0),
cache_size_(0) {}
-SimpleIndexFile::IndexMetadata::IndexMetadata(uint64_t number_of_entries,
- uint64_t cache_size)
+SimpleIndexFile::IndexMetadata::IndexMetadata(
+ SimpleIndex::IndexWriteToDiskReason reason,
+ uint64_t entry_count,
+ uint64_t cache_size)
: magic_number_(kSimpleIndexMagicNumber),
version_(kSimpleVersion),
- number_of_entries_(number_of_entries),
+ reason_(reason),
+ entry_count_(entry_count),
cache_size_(cache_size) {}
void SimpleIndexFile::IndexMetadata::Serialize(base::Pickle* pickle) const {
DCHECK(pickle);
pickle->WriteUInt64(magic_number_);
pickle->WriteUInt32(version_);
- pickle->WriteUInt64(number_of_entries_);
+ pickle->WriteUInt64(entry_count_);
pickle->WriteUInt64(cache_size_);
+ pickle->WriteUInt32(static_cast<uint32_t>(reason_));
}
// static
@@ -179,17 +227,26 @@ bool SimpleIndexFile::SerializeFinalData(base::Time cache_modified,
bool SimpleIndexFile::IndexMetadata::Deserialize(base::PickleIterator* it) {
DCHECK(it);
- return it->ReadUInt64(&magic_number_) &&
- it->ReadUInt32(&version_) &&
- it->ReadUInt64(&number_of_entries_)&&
- it->ReadUInt64(&cache_size_);
+
+ bool v6_format_index_read_results =
+ it->ReadUInt64(&magic_number_) && it->ReadUInt32(&version_) &&
+ it->ReadUInt64(&entry_count_) && it->ReadUInt64(&cache_size_);
+ if (!v6_format_index_read_results)
+ return false;
+ if (version_ >= 7) {
+ uint32_t tmp_reason;
+ if (!it->ReadUInt32(&tmp_reason))
+ return false;
+ reason_ = static_cast<SimpleIndex::IndexWriteToDiskReason>(tmp_reason);
+ }
+ return true;
}
void SimpleIndexFile::SyncWriteToDisk(net::CacheType cache_type,
const base::FilePath& cache_directory,
const base::FilePath& index_filename,
const base::FilePath& temp_index_filename,
- scoped_ptr<base::Pickle> pickle,
+ std::unique_ptr<base::Pickle> pickle,
const base::TimeTicks& start_time,
bool app_on_background) {
DCHECK_EQ(index_filename.DirName().value(),
@@ -236,9 +293,16 @@ void SimpleIndexFile::SyncWriteToDisk(net::CacheType cache_type,
}
bool SimpleIndexFile::IndexMetadata::CheckIndexMetadata() {
- return number_of_entries_ <= kMaxEntiresInIndex &&
- magic_number_ == kSimpleIndexMagicNumber &&
- version_ == kSimpleVersion;
+ if (entry_count_ > kMaxEntriesInIndex ||
+ magic_number_ != kSimpleIndexMagicNumber) {
+ return false;
+ }
+
+ static_assert(kSimpleVersion == 7, "index metadata reader out of date");
+ // No |reason_| is saved in the version 6 file format.
+ if (version_ == 6)
+ return reason_ == SimpleIndex::INDEX_WRITE_REASON_MAX;
+ return version_ == 7 && reason_ < SimpleIndex::INDEX_WRITE_REASON_MAX;
}
SimpleIndexFile::SimpleIndexFile(
@@ -268,13 +332,15 @@ void SimpleIndexFile::LoadIndexEntries(base::Time cache_last_modified,
worker_pool_->PostTaskAndReply(FROM_HERE, task, callback);
}
-void SimpleIndexFile::WriteToDisk(const SimpleIndex::EntrySet& entry_set,
+void SimpleIndexFile::WriteToDisk(SimpleIndex::IndexWriteToDiskReason reason,
+ const SimpleIndex::EntrySet& entry_set,
uint64_t cache_size,
const base::TimeTicks& start,
bool app_on_background,
const base::Closure& callback) {
- IndexMetadata index_metadata(entry_set.size(), cache_size);
- scoped_ptr<base::Pickle> pickle = Serialize(index_metadata, entry_set);
+ UmaRecordIndexWriteReason(reason, cache_type_);
+ IndexMetadata index_metadata(reason, entry_set.size(), cache_size);
+ std::unique_ptr<base::Pickle> pickle = Serialize(index_metadata, entry_set);
base::Closure task =
base::Bind(&SimpleIndexFile::SyncWriteToDisk,
cache_type_, cache_directory_, index_file_, temp_index_file_,
@@ -303,6 +369,11 @@ void SimpleIndexFile::SyncLoadIndexEntries(
UmaRecordIndexFileState(INDEX_STATE_CORRUPT, cache_type);
} else {
if (cache_last_modified <= last_cache_seen_by_index) {
+ if (out_result->index_write_reason !=
+ SimpleIndex::INDEX_WRITE_REASON_MAX) {
+ UmaRecordIndexWriteReasonAtLoad(out_result->index_write_reason,
+ cache_type);
+ }
base::Time latest_dir_mtime;
simple_util::GetMTime(cache_directory, &latest_dir_mtime);
if (LegacyIsIndexFileStale(latest_dir_mtime, index_file_path)) {
@@ -319,6 +390,8 @@ void SimpleIndexFile::SyncLoadIndexEntries(
}
// Reconstruct the index by scanning the disk for entries.
+ SimpleIndex::EntrySet entries_from_stale_index;
+ entries_from_stale_index.swap(out_result->entries);
const base::TimeTicks start = base::TimeTicks::Now();
SyncRestoreFromDisk(cache_directory, index_file_path, out_result);
SIMPLE_CACHE_UMA(MEDIUM_TIMES, "IndexRestoreTime", cache_type,
@@ -328,6 +401,18 @@ void SimpleIndexFile::SyncLoadIndexEntries(
if (index_file_existed) {
out_result->init_method = SimpleIndex::INITIALIZE_METHOD_RECOVERED;
+ int missed_entry_count = 0;
+ for (const auto& i : out_result->entries) {
+ if (entries_from_stale_index.count(i.first) == 0)
+ ++missed_entry_count;
+ }
+ int extra_entry_count = 0;
+ for (const auto& i : entries_from_stale_index) {
+ if (out_result->entries.count(i.first) == 0)
+ ++extra_entry_count;
+ }
+ UmaRecordStaleIndexQuality(missed_entry_count, extra_entry_count,
+ cache_type);
} else {
out_result->init_method = SimpleIndex::INITIALIZE_METHOD_NEWCACHE;
SIMPLE_CACHE_UMA(COUNTS,
@@ -365,10 +450,10 @@ void SimpleIndexFile::SyncLoadFromDisk(const base::FilePath& index_filename,
}
// static
-scoped_ptr<base::Pickle> SimpleIndexFile::Serialize(
+std::unique_ptr<base::Pickle> SimpleIndexFile::Serialize(
const SimpleIndexFile::IndexMetadata& index_metadata,
const SimpleIndex::EntrySet& entries) {
- scoped_ptr<base::Pickle> pickle(
+ std::unique_ptr<base::Pickle> pickle(
new base::Pickle(sizeof(SimpleIndexFile::PickleHeader)));
index_metadata.Serialize(pickle.get());
@@ -417,8 +502,8 @@ void SimpleIndexFile::Deserialize(const char* data, int data_len,
return;
}
- entries->reserve(index_metadata.GetNumberOfEntries() + kExtraSizeForMerge);
- while (entries->size() < index_metadata.GetNumberOfEntries()) {
+ entries->reserve(index_metadata.entry_count() + kExtraSizeForMerge);
+ while (entries->size() < index_metadata.entry_count()) {
uint64_t hash_key;
EntryMetadata entry_metadata;
if (!pickle_it.ReadUInt64(&hash_key) ||
@@ -438,6 +523,7 @@ void SimpleIndexFile::Deserialize(const char* data, int data_len,
DCHECK(out_cache_last_modified);
*out_cache_last_modified = base::Time::FromInternalValue(cache_last_modified);
+ out_result->index_write_reason = index_metadata.reason();
out_result->did_load = true;
}
diff --git a/chromium/net/disk_cache/simple/simple_index_file.h b/chromium/net/disk_cache/simple/simple_index_file.h
index a5db59af2bc..981a03dd7d1 100644
--- a/chromium/net/disk_cache/simple/simple_index_file.h
+++ b/chromium/net/disk_cache/simple/simple_index_file.h
@@ -7,15 +7,14 @@
#include <stdint.h>
+#include <memory>
#include <string>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/files/file_path.h"
#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
#include "base/pickle.h"
#include "net/base/cache_type.h"
#include "net/base/net_export.h"
@@ -37,16 +36,16 @@ struct NET_EXPORT_PRIVATE SimpleIndexLoadResult {
bool did_load;
SimpleIndex::EntrySet entries;
+ SimpleIndex::IndexWriteToDiskReason index_write_reason;
SimpleIndex::IndexInitMethod init_method;
bool flush_required;
};
-// Simple Index File format is a pickle serialized data of IndexMetadata and
-// EntryMetadata objects. The file format is as follows: one instance of
-// serialized |IndexMetadata| followed serialized |EntryMetadata| entries
-// repeated |number_of_entries| amount of times. To know more about the format,
-// see SimpleIndexFile::Serialize() and SeeSimpleIndexFile::LoadFromDisk()
-// methods.
+// Simple Index File format is a pickle of IndexMetadata and EntryMetadata
+// objects. The file format is as follows: one instance of |IndexMetadata|
+// followed by |EntryMetadata| repeated |entry_count| times. To learn more about
+// the format see |SimpleIndexFile::Serialize()| and
+// |SimpleIndexFile::LoadFromDisk()|.
//
// The non-static methods must run on the IO thread. All the real
// work is done in the static methods, which are run on the cache thread
@@ -57,22 +56,28 @@ class NET_EXPORT_PRIVATE SimpleIndexFile {
class NET_EXPORT_PRIVATE IndexMetadata {
public:
IndexMetadata();
- IndexMetadata(uint64_t number_of_entries, uint64_t cache_size);
+ IndexMetadata(SimpleIndex::IndexWriteToDiskReason reason,
+ uint64_t entry_count,
+ uint64_t cache_size);
- void Serialize(base::Pickle* pickle) const;
+ virtual void Serialize(base::Pickle* pickle) const;
bool Deserialize(base::PickleIterator* it);
bool CheckIndexMetadata();
- uint64_t GetNumberOfEntries() { return number_of_entries_; }
+ SimpleIndex::IndexWriteToDiskReason reason() const { return reason_; }
+ uint64_t entry_count() const { return entry_count_; }
private:
FRIEND_TEST_ALL_PREFIXES(IndexMetadataTest, Basics);
FRIEND_TEST_ALL_PREFIXES(IndexMetadataTest, Serialize);
+ FRIEND_TEST_ALL_PREFIXES(IndexMetadataTest, ReadV6Format);
+ friend class V6IndexMetadataForTest;
uint64_t magic_number_;
uint32_t version_;
- uint64_t number_of_entries_;
+ SimpleIndex::IndexWriteToDiskReason reason_;
+ uint64_t entry_count_;
uint64_t cache_size_; // Total cache storage size in bytes.
};
@@ -89,7 +94,8 @@ class NET_EXPORT_PRIVATE SimpleIndexFile {
SimpleIndexLoadResult* out_result);
// Write the specified set of entries to disk.
- virtual void WriteToDisk(const SimpleIndex::EntrySet& entry_set,
+ virtual void WriteToDisk(SimpleIndex::IndexWriteToDiskReason reason,
+ const SimpleIndex::EntrySet& entry_set,
uint64_t cache_size,
const base::TimeTicks& start,
bool app_on_background,
@@ -122,7 +128,7 @@ class NET_EXPORT_PRIVATE SimpleIndexFile {
// data to be written to a file. Note: the pickle is not in a consistent state
// immediately after calling this menthod, one needs to call
// SerializeFinalData to make it ready to write to a file.
- static scoped_ptr<base::Pickle> Serialize(
+ static std::unique_ptr<base::Pickle> Serialize(
const SimpleIndexFile::IndexMetadata& index_metadata,
const SimpleIndex::EntrySet& entries);
@@ -153,7 +159,7 @@ class NET_EXPORT_PRIVATE SimpleIndexFile {
const base::FilePath& cache_directory,
const base::FilePath& index_filename,
const base::FilePath& temp_index_filename,
- scoped_ptr<base::Pickle> pickle,
+ std::unique_ptr<base::Pickle> pickle,
const base::TimeTicks& start_time,
bool app_on_background);
diff --git a/chromium/net/disk_cache/simple/simple_index_file_posix.cc b/chromium/net/disk_cache/simple/simple_index_file_posix.cc
index 586699d2a8e..e0dd3dd126a 100644
--- a/chromium/net/disk_cache/simple/simple_index_file_posix.cc
+++ b/chromium/net/disk_cache/simple/simple_index_file_posix.cc
@@ -9,10 +9,10 @@
#include <sys/types.h>
#include <unistd.h>
+#include <memory>
#include <string>
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
namespace disk_cache {
namespace {
@@ -21,7 +21,7 @@ struct DirCloser {
void operator()(DIR* dir) { closedir(dir); }
};
-typedef scoped_ptr<DIR, DirCloser> ScopedDir;
+typedef std::unique_ptr<DIR, DirCloser> ScopedDir;
} // namespace
diff --git a/chromium/net/disk_cache/simple/simple_index_file_unittest.cc b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
index 3f1bca572dd..187ef7796a3 100644
--- a/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
@@ -2,19 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "net/disk_cache/simple/simple_index_file.h"
+
+#include <memory>
+
#include "base/files/file.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
#include "base/hash.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
#include "base/pickle.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
-#include "base/thread_task_runner_handle.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/base/cache_type.h"
#include "net/base/test_completion_callback.h"
@@ -23,7 +26,6 @@
#include "net/disk_cache/simple/simple_backend_version.h"
#include "net/disk_cache/simple/simple_entry_format.h"
#include "net/disk_cache/simple/simple_index.h"
-#include "net/disk_cache/simple/simple_index_file.h"
#include "net/disk_cache/simple/simple_util.h"
#include "net/disk_cache/simple/simple_version_upgrade.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -44,14 +46,18 @@ TEST(IndexMetadataTest, Basics) {
EXPECT_EQ(disk_cache::kSimpleIndexMagicNumber, index_metadata.magic_number_);
EXPECT_EQ(disk_cache::kSimpleVersion, index_metadata.version_);
- EXPECT_EQ(0U, index_metadata.GetNumberOfEntries());
+ EXPECT_EQ(0U, index_metadata.entry_count());
EXPECT_EQ(0U, index_metadata.cache_size_);
+ // Without setting a |reason_|, the index metadata isn't valid.
+ index_metadata.reason_ = SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN;
+
EXPECT_TRUE(index_metadata.CheckIndexMetadata());
}
TEST(IndexMetadataTest, Serialize) {
- SimpleIndexFile::IndexMetadata index_metadata(123, 456);
+ SimpleIndexFile::IndexMetadata index_metadata(
+ SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN, 123, 456);
base::Pickle pickle;
index_metadata.Serialize(&pickle);
base::PickleIterator it(pickle);
@@ -60,13 +66,56 @@ TEST(IndexMetadataTest, Serialize) {
EXPECT_EQ(new_index_metadata.magic_number_, index_metadata.magic_number_);
EXPECT_EQ(new_index_metadata.version_, index_metadata.version_);
- EXPECT_EQ(new_index_metadata.GetNumberOfEntries(),
- index_metadata.GetNumberOfEntries());
+ EXPECT_EQ(new_index_metadata.reason_, index_metadata.reason_);
+ EXPECT_EQ(new_index_metadata.entry_count(), index_metadata.entry_count());
EXPECT_EQ(new_index_metadata.cache_size_, index_metadata.cache_size_);
EXPECT_TRUE(new_index_metadata.CheckIndexMetadata());
}
+// This derived index metadata class allows us to serialize the older V6 format
+// of the index metadata, thus allowing us to test deserializing the old format.
+class V6IndexMetadataForTest : public SimpleIndexFile::IndexMetadata {
+ public:
+ // Do not default to |SimpleIndex::INDEX_WRITE_REASON_MAX|, because we want to
+ // ensure we don't serialize that value and then deserialize it and have a
+ // false positive result.
+ V6IndexMetadataForTest(uint64_t entry_count, uint64_t cache_size)
+ : SimpleIndexFile::IndexMetadata(SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN,
+ entry_count,
+ cache_size) {
+ version_ = 6;
+ }
+
+ // Copied and pasted from the V6 implementation of
+ // |SimpleIndexFile::IndexMetadata()| (removing DCHECKs).
+ void Serialize(base::Pickle* pickle) const override {
+ pickle->WriteUInt64(magic_number_);
+ pickle->WriteUInt32(version_);
+ pickle->WriteUInt64(entry_count_);
+ pickle->WriteUInt64(cache_size_);
+ }
+};
+
+TEST(IndexMetadataTest, ReadV6Format) {
+ V6IndexMetadataForTest v6_index_metadata(123, 456);
+ EXPECT_EQ(6U, v6_index_metadata.version_);
+ base::Pickle pickle;
+ v6_index_metadata.Serialize(&pickle);
+ base::PickleIterator it(pickle);
+ SimpleIndexFile::IndexMetadata new_index_metadata;
+ new_index_metadata.Deserialize(&it);
+
+ EXPECT_EQ(new_index_metadata.magic_number_, v6_index_metadata.magic_number_);
+ EXPECT_EQ(new_index_metadata.version_, v6_index_metadata.version_);
+
+ EXPECT_EQ(new_index_metadata.reason_, SimpleIndex::INDEX_WRITE_REASON_MAX);
+ EXPECT_EQ(new_index_metadata.entry_count(), v6_index_metadata.entry_count());
+ EXPECT_EQ(new_index_metadata.cache_size_, v6_index_metadata.cache_size_);
+
+ EXPECT_TRUE(new_index_metadata.CheckIndexMetadata());
+}
+
// This friend derived class is able to reexport its ancestors private methods
// as public, for use in tests.
class WrappedSimpleIndexFile : public SimpleIndexFile {
@@ -113,6 +162,7 @@ TEST_F(SimpleIndexFileTest, Serialize) {
EntryMetadata metadata_entries[kNumHashes];
SimpleIndexFile::IndexMetadata index_metadata(
+ SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN,
static_cast<uint64_t>(kNumHashes), 456);
for (size_t i = 0; i < kNumHashes; ++i) {
uint64_t hash = kHashes[i];
@@ -120,7 +170,7 @@ TEST_F(SimpleIndexFileTest, Serialize) {
SimpleIndex::InsertInEntrySet(hash, metadata_entries[i], &entries);
}
- scoped_ptr<base::Pickle> pickle =
+ std::unique_ptr<base::Pickle> pickle =
WrappedSimpleIndexFile::Serialize(index_metadata, entries);
EXPECT_TRUE(pickle.get() != NULL);
base::Time now = base::Time::Now();
@@ -194,8 +244,9 @@ TEST_F(SimpleIndexFileTest, WriteThenLoadIndex) {
net::TestClosure closure;
{
WrappedSimpleIndexFile simple_index_file(cache_dir.path());
- simple_index_file.WriteToDisk(entries, kCacheSize, base::TimeTicks(),
- false, closure.closure());
+ simple_index_file.WriteToDisk(SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN,
+ entries, kCacheSize, base::TimeTicks(), false,
+ closure.closure());
closure.WaitForResult();
EXPECT_TRUE(base::PathExists(simple_index_file.GetIndexFilePath()));
}
@@ -334,7 +385,8 @@ TEST_F(SimpleIndexFileTest, OverwritesStaleTempFile) {
SimpleIndex::EntrySet entries;
SimpleIndex::InsertInEntrySet(11, EntryMetadata(Time(), 11), &entries);
net::TestClosure closure;
- simple_index_file.WriteToDisk(entries, 120U, base::TimeTicks(), false,
+ simple_index_file.WriteToDisk(SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN,
+ entries, 120U, base::TimeTicks(), false,
closure.closure());
closure.WaitForResult();
diff --git a/chromium/net/disk_cache/simple/simple_index_unittest.cc b/chromium/net/disk_cache/simple/simple_index_unittest.cc
index 649d48f538c..d763d7f55e1 100644
--- a/chromium/net/disk_cache/simple/simple_index_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_index_unittest.cc
@@ -6,12 +6,12 @@
#include <algorithm>
#include <functional>
+#include <memory>
#include <utility>
#include "base/files/scoped_temp_dir.h"
#include "base/hash.h"
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
#include "base/pickle.h"
#include "base/sha1.h"
#include "base/strings/stringprintf.h"
@@ -67,7 +67,8 @@ class MockSimpleIndexFile : public SimpleIndexFile,
++load_index_entries_calls_;
}
- void WriteToDisk(const SimpleIndex::EntrySet& entry_set,
+ void WriteToDisk(SimpleIndex::IndexWriteToDiskReason reason,
+ const SimpleIndex::EntrySet& entry_set,
uint64_t cache_size,
const base::TimeTicks& start,
bool app_on_background,
@@ -105,7 +106,7 @@ class SimpleIndexTest : public testing::Test, public SimpleIndexDelegate {
}
void SetUp() override {
- scoped_ptr<MockSimpleIndexFile> index_file(new MockSimpleIndexFile());
+ std::unique_ptr<MockSimpleIndexFile> index_file(new MockSimpleIndexFile());
index_file_ = index_file->AsWeakPtr();
index_.reset(
new SimpleIndex(NULL, this, net::DISK_CACHE, std::move(index_file)));
@@ -161,7 +162,7 @@ class SimpleIndexTest : public testing::Test, public SimpleIndexDelegate {
int doom_entries_calls() const { return doom_entries_calls_; }
const simple_util::ImmutableArray<uint64_t, 16> hashes_;
- scoped_ptr<SimpleIndex> index_;
+ std::unique_ptr<SimpleIndex> index_;
base::WeakPtr<MockSimpleIndexFile> index_file_;
std::vector<uint64_t> last_doom_entry_hashes_;
@@ -207,13 +208,13 @@ TEST_F(SimpleIndexTest, IndexSizeCorrectOnMerge) {
index()->UpdateEntrySize(hashes_.at<4>(), 4);
EXPECT_EQ(9U, index()->cache_size_);
{
- scoped_ptr<SimpleIndexLoadResult> result(new SimpleIndexLoadResult());
+ std::unique_ptr<SimpleIndexLoadResult> result(new SimpleIndexLoadResult());
result->did_load = true;
index()->MergeInitializingSet(std::move(result));
}
EXPECT_EQ(9U, index()->cache_size_);
{
- scoped_ptr<SimpleIndexLoadResult> result(new SimpleIndexLoadResult());
+ std::unique_ptr<SimpleIndexLoadResult> result(new SimpleIndexLoadResult());
result->did_load = true;
const uint64_t new_hash_key = hashes_.at<11>();
result->entries.insert(
diff --git a/chromium/net/disk_cache/simple/simple_net_log_parameters.cc b/chromium/net/disk_cache/simple/simple_net_log_parameters.cc
index 46c678c2001..fe565ef91da 100644
--- a/chromium/net/disk_cache/simple/simple_net_log_parameters.cc
+++ b/chromium/net/disk_cache/simple/simple_net_log_parameters.cc
@@ -17,20 +17,20 @@
namespace {
-scoped_ptr<base::Value> NetLogSimpleEntryConstructionCallback(
+std::unique_ptr<base::Value> NetLogSimpleEntryConstructionCallback(
const disk_cache::SimpleEntryImpl* entry,
net::NetLogCaptureMode capture_mode) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
dict->SetString("entry_hash",
base::StringPrintf("%#016" PRIx64, entry->entry_hash()));
return std::move(dict);
}
-scoped_ptr<base::Value> NetLogSimpleEntryCreationCallback(
+std::unique_ptr<base::Value> NetLogSimpleEntryCreationCallback(
const disk_cache::SimpleEntryImpl* entry,
int net_error,
net::NetLogCaptureMode /* capture_mode */) {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
+ std::unique_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
dict->SetInteger("net_error", net_error);
if (net_error == net::OK)
dict->SetString("key", entry->key());
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
index 220f6a8fea8..71f1cb9fffb 100644
--- a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
@@ -18,6 +18,8 @@
#include "base/sha1.h"
#include "base/strings/stringprintf.h"
#include "base/timer/elapsed_timer.h"
+#include "crypto/secure_hash.h"
+#include "net/base/hash_value.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/disk_cache/simple/simple_backend_version.h"
@@ -39,7 +41,7 @@ enum OpenEntryResult {
OPEN_ENTRY_BAD_MAGIC_NUMBER = 3,
OPEN_ENTRY_BAD_VERSION = 4,
OPEN_ENTRY_CANT_READ_KEY = 5,
- // OPEN_ENTRY_KEY_MISMATCH = 6, Deprecated.
+ OPEN_ENTRY_KEY_MISMATCH = 6,
OPEN_ENTRY_KEY_HASH_MISMATCH = 7,
OPEN_ENTRY_SPARSE_OPEN_FAILED = 8,
OPEN_ENTRY_MAX = 9,
@@ -63,6 +65,7 @@ enum CheckEOFResult {
CHECK_EOF_RESULT_READ_FAILURE,
CHECK_EOF_RESULT_MAGIC_NUMBER_MISMATCH,
CHECK_EOF_RESULT_CRC_MISMATCH,
+ CHECK_EOF_RESULT_KEY_SHA256_MISMATCH,
CHECK_EOF_RESULT_MAX,
};
@@ -72,6 +75,9 @@ enum CloseResult {
CLOSE_RESULT_WRITE_FAILURE,
};
+// Used in histograms, please only add entries at the end.
+enum class KeySHA256Result { NOT_PRESENT, MATCHED, NO_MATCH, MAX };
+
void RecordSyncOpenResult(net::CacheType cache_type,
OpenEntryResult result,
bool had_index) {
@@ -105,6 +111,12 @@ void RecordCloseResult(net::CacheType cache_type, CloseResult result) {
"SyncCloseResult", cache_type, result, WRITE_RESULT_MAX);
}
+void RecordKeySHA256Result(net::CacheType cache_type, KeySHA256Result result) {
+ SIMPLE_CACHE_UMA(ENUMERATION, "SyncKeySHA256Result", cache_type,
+ static_cast<int>(result),
+ static_cast<int>(KeySHA256Result::MAX));
+}
+
bool CanOmitEmptyFile(int file_index) {
DCHECK_GE(file_index, 0);
DCHECK_LT(file_index, disk_cache::kSimpleEntryFileCount);
@@ -123,6 +135,14 @@ bool TruncatePath(const FilePath& filename_to_truncate) {
return true;
}
+void CalculateSHA256OfKey(const std::string& key,
+ net::SHA256HashValue* out_hash_value) {
+ std::unique_ptr<crypto::SecureHash> hash(
+ crypto::SecureHash::Create(crypto::SecureHash::SHA256));
+ hash->Update(key.data(), key.size());
+ hash->Finish(out_hash_value, sizeof(*out_hash_value));
+}
+
} // namespace
namespace disk_cache {
@@ -130,8 +150,9 @@ namespace disk_cache {
using simple_util::GetEntryHashKey;
using simple_util::GetFilenameFromEntryHashAndFileIndex;
using simple_util::GetSparseFilenameFromEntryHash;
-using simple_util::GetDataSizeFromKeyAndFileSize;
-using simple_util::GetFileSizeFromKeyAndDataSize;
+using simple_util::GetHeaderSize;
+using simple_util::GetDataSizeFromFileSize;
+using simple_util::GetFileSizeFromDataSize;
using simple_util::GetFileIndexFromStreamIndex;
SimpleEntryStat::SimpleEntryStat(base::Time last_used,
@@ -144,35 +165,46 @@ SimpleEntryStat::SimpleEntryStat(base::Time last_used,
memcpy(data_size_, data_size, sizeof(data_size_));
}
-int SimpleEntryStat::GetOffsetInFile(const std::string& key,
+// These size methods all assume the presence of the SHA256 on stream zero,
+// since this version of the cache always writes it. In the read case, it may
+// not be present and these methods can't be relied upon.
+
+int SimpleEntryStat::GetOffsetInFile(size_t key_length,
int offset,
int stream_index) const {
- const size_t headers_size = sizeof(SimpleFileHeader) + key.size();
+ const size_t headers_size = sizeof(SimpleFileHeader) + key_length;
const size_t additional_offset =
stream_index == 0 ? data_size_[1] + sizeof(SimpleFileEOF) : 0;
return headers_size + offset + additional_offset;
}
-int SimpleEntryStat::GetEOFOffsetInFile(const std::string& key,
+int SimpleEntryStat::GetEOFOffsetInFile(size_t key_length,
int stream_index) const {
- return GetOffsetInFile(key, data_size_[stream_index], stream_index);
+ size_t additional_offset;
+ if (stream_index != 0)
+ additional_offset = 0;
+ else
+ additional_offset = sizeof(net::SHA256HashValue);
+ return additional_offset +
+ GetOffsetInFile(key_length, data_size_[stream_index], stream_index);
}
-int SimpleEntryStat::GetLastEOFOffsetInFile(const std::string& key,
+int SimpleEntryStat::GetLastEOFOffsetInFile(size_t key_length,
int stream_index) const {
- const int file_index = GetFileIndexFromStreamIndex(stream_index);
- const int eof_data_offset =
- file_index == 0 ? data_size_[0] + data_size_[1] + sizeof(SimpleFileEOF)
- : data_size_[2];
- return GetOffsetInFile(key, eof_data_offset, stream_index);
+ if (stream_index == 1)
+ return GetEOFOffsetInFile(key_length, 0);
+ return GetEOFOffsetInFile(key_length, stream_index);
}
-int64_t SimpleEntryStat::GetFileSize(const std::string& key,
- int file_index) const {
- const int32_t total_data_size =
- file_index == 0 ? data_size_[0] + data_size_[1] + sizeof(SimpleFileEOF)
- : data_size_[2];
- return GetFileSizeFromKeyAndDataSize(key, total_data_size);
+int64_t SimpleEntryStat::GetFileSize(size_t key_length, int file_index) const {
+ int32_t total_data_size;
+ if (file_index == 0) {
+ total_data_size = data_size_[0] + data_size_[1] +
+ sizeof(net::SHA256HashValue) + sizeof(SimpleFileEOF);
+ } else {
+ total_data_size = data_size_[2];
+ }
+ return GetFileSizeFromDataSize(key_length, total_data_size);
}
SimpleEntryCreationResults::SimpleEntryCreationResults(
@@ -223,17 +255,16 @@ SimpleSynchronousEntry::EntryOperationData::EntryOperationData(
void SimpleSynchronousEntry::OpenEntry(
net::CacheType cache_type,
const FilePath& path,
+ const std::string& key,
const uint64_t entry_hash,
- bool had_index,
+ const bool had_index,
SimpleEntryCreationResults* out_results) {
base::ElapsedTimer open_time;
SimpleSynchronousEntry* sync_entry =
- new SimpleSynchronousEntry(cache_type, path, "", entry_hash);
- out_results->result =
- sync_entry->InitializeForOpen(had_index,
- &out_results->entry_stat,
- &out_results->stream_0_data,
- &out_results->stream_0_crc32);
+ new SimpleSynchronousEntry(cache_type, path, key, entry_hash, had_index);
+ out_results->result = sync_entry->InitializeForOpen(
+ &out_results->entry_stat, &out_results->stream_0_data,
+ &out_results->stream_0_crc32);
if (out_results->result != net::OK) {
sync_entry->Doom();
delete sync_entry;
@@ -251,13 +282,13 @@ void SimpleSynchronousEntry::CreateEntry(
const FilePath& path,
const std::string& key,
const uint64_t entry_hash,
- bool had_index,
+ const bool had_index,
SimpleEntryCreationResults* out_results) {
DCHECK_EQ(entry_hash, GetEntryHashKey(key));
SimpleSynchronousEntry* sync_entry =
- new SimpleSynchronousEntry(cache_type, path, key, entry_hash);
- out_results->result = sync_entry->InitializeForCreate(
- had_index, &out_results->entry_stat);
+ new SimpleSynchronousEntry(cache_type, path, key, entry_hash, had_index);
+ out_results->result =
+ sync_entry->InitializeForCreate(&out_results->entry_stat);
if (out_results->result != net::OK) {
if (out_results->result != net::ERR_FILE_EXISTS)
sync_entry->Doom();
@@ -298,19 +329,24 @@ void SimpleSynchronousEntry::ReadData(const EntryOperationData& in_entry_op,
net::IOBuffer* out_buf,
uint32_t* out_crc32,
SimpleEntryStat* entry_stat,
- int* out_result) const {
+ int* out_result) {
DCHECK(initialized_);
DCHECK_NE(0, in_entry_op.index);
- const int64_t file_offset =
- entry_stat->GetOffsetInFile(key_, in_entry_op.offset, in_entry_op.index);
int file_index = GetFileIndexFromStreamIndex(in_entry_op.index);
+ if (header_and_key_check_needed_[file_index] &&
+ !CheckHeaderAndKey(file_index)) {
+ *out_result = net::ERR_FAILED;
+ Doom();
+ return;
+ }
+ const int64_t file_offset = entry_stat->GetOffsetInFile(
+ key_.size(), in_entry_op.offset, in_entry_op.index);
// Zero-length reads and reads to the empty streams of omitted files should
// be handled in the SimpleEntryImpl.
DCHECK_GT(in_entry_op.buf_len, 0);
DCHECK(!empty_file_omitted_[file_index]);
- File* file = const_cast<File*>(&files_[file_index]);
- int bytes_read =
- file->Read(file_offset, out_buf->data(), in_entry_op.buf_len);
+ int bytes_read = files_[file_index].Read(file_offset, out_buf->data(),
+ in_entry_op.buf_len);
if (bytes_read > 0) {
entry_stat->set_last_used(Time::Now());
*out_crc32 = crc32(crc32(0L, Z_NULL, 0),
@@ -333,12 +369,18 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
DCHECK_NE(0, in_entry_op.index);
int index = in_entry_op.index;
int file_index = GetFileIndexFromStreamIndex(index);
+ if (header_and_key_check_needed_[file_index] &&
+ !empty_file_omitted_[file_index] && !CheckHeaderAndKey(file_index)) {
+ *out_result = net::ERR_FAILED;
+ Doom();
+ return;
+ }
int offset = in_entry_op.offset;
int buf_len = in_entry_op.buf_len;
bool truncate = in_entry_op.truncate;
bool doomed = in_entry_op.doomed;
const int64_t file_offset = out_entry_stat->GetOffsetInFile(
- key_, in_entry_op.offset, in_entry_op.index);
+ key_.size(), in_entry_op.offset, in_entry_op.index);
bool extending_by_write = offset + buf_len > out_entry_stat->data_size(index);
if (empty_file_omitted_[file_index]) {
@@ -371,7 +413,7 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
if (extending_by_write) {
// The EOF record and the eventual stream afterward need to be zeroed out.
const int64_t file_eof_offset =
- out_entry_stat->GetEOFOffsetInFile(key_, index);
+ out_entry_stat->GetEOFOffsetInFile(key_.size(), index);
if (!files_[file_index].SetLength(file_eof_offset)) {
RecordWriteResult(cache_type_, WRITE_RESULT_PRETRUNCATE_FAILURE);
Doom();
@@ -393,7 +435,8 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
index, std::max(out_entry_stat->data_size(index), offset + buf_len));
} else {
out_entry_stat->set_data_size(index, offset + buf_len);
- int file_eof_offset = out_entry_stat->GetLastEOFOffsetInFile(key_, index);
+ int file_eof_offset =
+ out_entry_stat->GetLastEOFOffsetInFile(key_.size(), index);
if (!files_[file_index].SetLength(file_eof_offset)) {
RecordWriteResult(cache_type_, WRITE_RESULT_TRUNCATE_FAILURE);
Doom();
@@ -621,9 +664,10 @@ void SimpleSynchronousEntry::CheckEOFRecord(int index,
DCHECK(initialized_);
uint32_t crc32;
bool has_crc32;
+ bool has_key_sha256;
int stream_size;
- *out_result =
- GetEOFRecordData(index, entry_stat, &has_crc32, &crc32, &stream_size);
+ *out_result = GetEOFRecordData(index, entry_stat, &has_crc32, &has_key_sha256,
+ &crc32, &stream_size);
if (*out_result != net::OK) {
Doom();
return;
@@ -640,18 +684,9 @@ void SimpleSynchronousEntry::CheckEOFRecord(int index,
void SimpleSynchronousEntry::Close(
const SimpleEntryStat& entry_stat,
- scoped_ptr<std::vector<CRCRecord> > crc32s_to_write,
+ std::unique_ptr<std::vector<CRCRecord>> crc32s_to_write,
net::GrowableIOBuffer* stream_0_data) {
DCHECK(stream_0_data);
- // Write stream 0 data.
- int stream_0_offset = entry_stat.GetOffsetInFile(key_, 0, 0);
- if (files_[0].Write(stream_0_offset, stream_0_data->data(),
- entry_stat.data_size(0)) !=
- entry_stat.data_size(0)) {
- RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
- DVLOG(1) << "Could not write stream 0 data.";
- Doom();
- }
for (std::vector<CRCRecord>::const_iterator it = crc32s_to_write->begin();
it != crc32s_to_write->end(); ++it) {
@@ -660,14 +695,36 @@ void SimpleSynchronousEntry::Close(
if (empty_file_omitted_[file_index])
continue;
+ if (stream_index == 0) {
+ // Write stream 0 data.
+ int stream_0_offset = entry_stat.GetOffsetInFile(key_.size(), 0, 0);
+ if (files_[0].Write(stream_0_offset, stream_0_data->data(),
+ entry_stat.data_size(0)) != entry_stat.data_size(0)) {
+ RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
+ DVLOG(1) << "Could not write stream 0 data.";
+ Doom();
+ }
+ net::SHA256HashValue hash_value;
+ CalculateSHA256OfKey(key_, &hash_value);
+ if (files_[0].Write(stream_0_offset + entry_stat.data_size(0),
+ reinterpret_cast<char*>(hash_value.data),
+ sizeof(hash_value)) != sizeof(hash_value)) {
+ RecordCloseResult(cache_type_, CLOSE_RESULT_WRITE_FAILURE);
+ DVLOG(1) << "Could not write stream 0 data.";
+ Doom();
+ }
+ }
+
SimpleFileEOF eof_record;
eof_record.stream_size = entry_stat.data_size(stream_index);
eof_record.final_magic_number = kSimpleFinalMagicNumber;
eof_record.flags = 0;
if (it->has_crc32)
eof_record.flags |= SimpleFileEOF::FLAG_HAS_CRC32;
+ if (stream_index == 0)
+ eof_record.flags |= SimpleFileEOF::FLAG_HAS_KEY_SHA256;
eof_record.data_crc32 = it->data_crc32;
- int eof_offset = entry_stat.GetEOFOffsetInFile(key_, stream_index);
+ int eof_offset = entry_stat.GetEOFOffsetInFile(key_.size(), stream_index);
// If stream 0 changed size, the file needs to be resized, otherwise the
// next open will yield wrong stream sizes. On stream 1 and stream 2 proper
// resizing of the file is handled in SimpleSynchronousEntry::WriteData().
@@ -692,8 +749,11 @@ void SimpleSynchronousEntry::Close(
if (empty_file_omitted_[i])
continue;
+ if (header_and_key_check_needed_[i] && !CheckHeaderAndKey(i)) {
+ Doom();
+ }
files_[i].Close();
- const int64_t file_size = entry_stat.GetFileSize(key_, i);
+ const int64_t file_size = entry_stat.GetFileSize(key_.size(), i);
SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
"LastClusterSize", cache_type_,
file_size % 4096, 0, 4097, 50);
@@ -720,10 +780,12 @@ void SimpleSynchronousEntry::Close(
SimpleSynchronousEntry::SimpleSynchronousEntry(net::CacheType cache_type,
const FilePath& path,
const std::string& key,
- const uint64_t entry_hash)
+ const uint64_t entry_hash,
+ const bool had_index)
: cache_type_(cache_type),
path_(path),
entry_hash_(entry_hash),
+ had_index_(had_index),
key_(key),
have_open_files_(false),
initialized_(false) {
@@ -779,20 +841,19 @@ bool SimpleSynchronousEntry::MaybeCreateFile(
return files_[file_index].IsValid();
}
-bool SimpleSynchronousEntry::OpenFiles(
- bool had_index,
- SimpleEntryStat* out_entry_stat) {
+bool SimpleSynchronousEntry::OpenFiles(SimpleEntryStat* out_entry_stat) {
for (int i = 0; i < kSimpleEntryFileCount; ++i) {
File::Error error;
if (!MaybeOpenFile(i, &error)) {
- // TODO(ttuttle,gavinp): Remove one each of these triplets of histograms.
- // We can calculate the third as the sum or difference of the other two.
- RecordSyncOpenResult(
- cache_type_, OPEN_ENTRY_PLATFORM_FILE_ERROR, had_index);
+ // TODO(juliatuttle,gavinp): Remove one each of these triplets of
+ // histograms. We can calculate the third as the sum or difference of the
+ // other two.
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_PLATFORM_FILE_ERROR,
+ had_index_);
SIMPLE_CACHE_UMA(ENUMERATION,
"SyncOpenPlatformFileError", cache_type_,
-error, -base::File::FILE_ERROR_MAX);
- if (had_index) {
+ if (had_index_) {
SIMPLE_CACHE_UMA(ENUMERATION,
"SyncOpenPlatformFileError_WithIndex", cache_type_,
-error, -base::File::FILE_ERROR_MAX);
@@ -859,19 +920,18 @@ bool SimpleSynchronousEntry::OpenFiles(
return true;
}
-bool SimpleSynchronousEntry::CreateFiles(
- bool had_index,
- SimpleEntryStat* out_entry_stat) {
+bool SimpleSynchronousEntry::CreateFiles(SimpleEntryStat* out_entry_stat) {
for (int i = 0; i < kSimpleEntryFileCount; ++i) {
File::Error error;
if (!MaybeCreateFile(i, FILE_NOT_REQUIRED, &error)) {
- // TODO(ttuttle,gavinp): Remove one each of these triplets of histograms.
- // We can calculate the third as the sum or difference of the other two.
- RecordSyncCreateResult(CREATE_ENTRY_PLATFORM_FILE_ERROR, had_index);
+ // TODO(juliatuttle,gavinp): Remove one each of these triplets of
+ // histograms. We can calculate the third as the sum or difference of the
+ // other two.
+ RecordSyncCreateResult(CREATE_ENTRY_PLATFORM_FILE_ERROR, had_index_);
SIMPLE_CACHE_UMA(ENUMERATION,
"SyncCreatePlatformFileError", cache_type_,
-error, -base::File::FILE_ERROR_MAX);
- if (had_index) {
+ if (had_index_) {
SIMPLE_CACHE_UMA(ENUMERATION,
"SyncCreatePlatformFileError_WithIndex", cache_type_,
-error, -base::File::FILE_ERROR_MAX);
@@ -917,13 +977,80 @@ void SimpleSynchronousEntry::CloseFiles() {
CloseFile(i);
}
+bool SimpleSynchronousEntry::CheckHeaderAndKey(int file_index) {
+ // TODO(gavinp): Frequently we are doing this at the same time as we read from
+ // the beginning of an entry. It might improve performance to make a single
+ // read(2) call rather than two separate reads. On the other hand, it would
+ // mean an extra memory to memory copy. In the case where we are opening an
+ // entry without a key, the kInitialHeaderRead setting means that we are
+ // actually already reading stream 1 data here, and tossing it out.
+ std::vector<char> header_data(key_.empty() ? kInitialHeaderRead
+ : GetHeaderSize(key_.size()));
+ int bytes_read =
+ files_[file_index].Read(0, header_data.data(), header_data.size());
+ const SimpleFileHeader* header =
+ reinterpret_cast<const SimpleFileHeader*>(header_data.data());
+
+ if (bytes_read == -1 || static_cast<size_t>(bytes_read) < sizeof(*header)) {
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_CANT_READ_HEADER, had_index_);
+ return false;
+ }
+ // This resize will not invalidate iterators since it does not enlarge the
+ // header_data.
+ DCHECK_LE(static_cast<size_t>(bytes_read), header_data.size());
+ header_data.resize(bytes_read);
+
+ if (header->initial_magic_number != kSimpleInitialMagicNumber) {
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_BAD_MAGIC_NUMBER, had_index_);
+ return false;
+ }
+
+ if (header->version != kSimpleEntryVersionOnDisk) {
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_BAD_VERSION, had_index_);
+ return false;
+ }
+
+ size_t expected_header_size = GetHeaderSize(header->key_length);
+ if (header_data.size() < expected_header_size) {
+ size_t old_size = header_data.size();
+ int bytes_to_read = expected_header_size - old_size;
+ // This resize will invalidate iterators, since it is enlarging header_data.
+ header_data.resize(expected_header_size);
+ int bytes_read = files_[file_index].Read(
+ old_size, header_data.data() + old_size, bytes_to_read);
+ if (bytes_read != bytes_to_read) {
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_CANT_READ_KEY, had_index_);
+ return false;
+ }
+ header = reinterpret_cast<const SimpleFileHeader*>(header_data.data());
+ }
+
+ char* key_data = header_data.data() + sizeof(*header);
+ if (base::Hash(key_data, header->key_length) != header->key_hash) {
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_KEY_HASH_MISMATCH, had_index_);
+ return false;
+ }
+
+ std::string key_from_header(key_data, header->key_length);
+ if (key_.empty()) {
+ key_.swap(key_from_header);
+ } else {
+ if (key_ != key_from_header) {
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_KEY_MISMATCH, had_index_);
+ return false;
+ }
+ }
+
+ header_and_key_check_needed_[file_index] = false;
+ return true;
+}
+
int SimpleSynchronousEntry::InitializeForOpen(
- bool had_index,
SimpleEntryStat* out_entry_stat,
scoped_refptr<net::GrowableIOBuffer>* stream_0_data,
uint32_t* out_stream_0_crc32) {
DCHECK(!initialized_);
- if (!OpenFiles(had_index, out_entry_stat)) {
+ if (!OpenFiles(out_entry_stat)) {
DLOG(WARNING) << "Could not open platform files for entry.";
return net::ERR_FAILED;
}
@@ -931,69 +1058,35 @@ int SimpleSynchronousEntry::InitializeForOpen(
if (empty_file_omitted_[i])
continue;
- SimpleFileHeader header;
- int header_read_result =
- files_[i].Read(0, reinterpret_cast<char*>(&header), sizeof(header));
- if (header_read_result != sizeof(header)) {
- DLOG(WARNING) << "Cannot read header from entry.";
- RecordSyncOpenResult(cache_type_, OPEN_ENTRY_CANT_READ_HEADER, had_index);
- return net::ERR_FAILED;
- }
-
- if (header.initial_magic_number != kSimpleInitialMagicNumber) {
- // TODO(gavinp): This seems very bad; for now we log at WARNING, but we
- // should give consideration to not saturating the log with these if that
- // becomes a problem.
- DLOG(WARNING) << "Magic number did not match.";
- RecordSyncOpenResult(cache_type_, OPEN_ENTRY_BAD_MAGIC_NUMBER, had_index);
- return net::ERR_FAILED;
- }
-
- if (header.version != kSimpleEntryVersionOnDisk) {
- DLOG(WARNING) << "Unreadable version.";
- RecordSyncOpenResult(cache_type_, OPEN_ENTRY_BAD_VERSION, had_index);
- return net::ERR_FAILED;
- }
-
- scoped_ptr<char[]> key(new char[header.key_length]);
- int key_read_result = files_[i].Read(sizeof(header), key.get(),
- header.key_length);
- if (key_read_result != base::checked_cast<int>(header.key_length)) {
- DLOG(WARNING) << "Cannot read key from entry.";
- RecordSyncOpenResult(cache_type_, OPEN_ENTRY_CANT_READ_KEY, had_index);
- return net::ERR_FAILED;
+ if (!key_.empty()) {
+ header_and_key_check_needed_[i] = true;
+ } else {
+ if (!CheckHeaderAndKey(i))
+ return net::ERR_FAILED;
}
- key_ = std::string(key.get(), header.key_length);
if (i == 0) {
// File size for stream 0 has been stored temporarily in data_size[1].
- int total_data_size =
- GetDataSizeFromKeyAndFileSize(key_, out_entry_stat->data_size(1));
- int ret_value_stream_0 = ReadAndValidateStream0(
- total_data_size, out_entry_stat, stream_0_data, out_stream_0_crc32);
+ int ret_value_stream_0 =
+ ReadAndValidateStream0(out_entry_stat->data_size(1), out_entry_stat,
+ stream_0_data, out_stream_0_crc32);
if (ret_value_stream_0 != net::OK)
return ret_value_stream_0;
} else {
out_entry_stat->set_data_size(
- 2, GetDataSizeFromKeyAndFileSize(key_, out_entry_stat->data_size(2)));
+ 2,
+ GetDataSizeFromFileSize(key_.size(), out_entry_stat->data_size(2)));
if (out_entry_stat->data_size(2) < 0) {
DLOG(WARNING) << "Stream 2 file is too small.";
return net::ERR_FAILED;
}
}
-
- if (base::Hash(key.get(), header.key_length) != header.key_hash) {
- DLOG(WARNING) << "Hash mismatch on key.";
- RecordSyncOpenResult(
- cache_type_, OPEN_ENTRY_KEY_HASH_MISMATCH, had_index);
- return net::ERR_FAILED;
- }
}
int32_t sparse_data_size = 0;
if (!OpenSparseFileIfExists(&sparse_data_size)) {
- RecordSyncOpenResult(
- cache_type_, OPEN_ENTRY_SPARSE_OPEN_FAILED, had_index);
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_SPARSE_OPEN_FAILED,
+ had_index_);
return net::ERR_FAILED;
}
out_entry_stat->set_sparse_data_size(sparse_data_size);
@@ -1013,7 +1106,7 @@ int SimpleSynchronousEntry::InitializeForOpen(
SIMPLE_CACHE_UMA(BOOLEAN, "EntryOpenedAndStream2Removed", cache_type_,
removed_stream2);
- RecordSyncOpenResult(cache_type_, OPEN_ENTRY_SUCCESS, had_index);
+ RecordSyncOpenResult(cache_type_, OPEN_ENTRY_SUCCESS, had_index_);
initialized_ = true;
return net::OK;
}
@@ -1046,10 +1139,9 @@ bool SimpleSynchronousEntry::InitializeCreatedFile(
}
int SimpleSynchronousEntry::InitializeForCreate(
- bool had_index,
SimpleEntryStat* out_entry_stat) {
DCHECK(!initialized_);
- if (!CreateFiles(had_index, out_entry_stat)) {
+ if (!CreateFiles(out_entry_stat)) {
DLOG(WARNING) << "Could not create platform files.";
return net::ERR_FILE_EXISTS;
}
@@ -1059,49 +1151,56 @@ int SimpleSynchronousEntry::InitializeForCreate(
CreateEntryResult result;
if (!InitializeCreatedFile(i, &result)) {
- RecordSyncCreateResult(result, had_index);
+ RecordSyncCreateResult(result, had_index_);
return net::ERR_FAILED;
}
}
- RecordSyncCreateResult(CREATE_ENTRY_SUCCESS, had_index);
+ RecordSyncCreateResult(CREATE_ENTRY_SUCCESS, had_index_);
initialized_ = true;
return net::OK;
}
int SimpleSynchronousEntry::ReadAndValidateStream0(
- int total_data_size,
+ int file_size,
SimpleEntryStat* out_entry_stat,
scoped_refptr<net::GrowableIOBuffer>* stream_0_data,
- uint32_t* out_stream_0_crc32) const {
- // Temporarily assign all the data size to stream 1 in order to read the
- // EOF record for stream 0, which contains the size of stream 0.
+ uint32_t* out_stream_0_crc32) {
+ // Pretend this file has a null stream zero, and contains the optional key
+ // SHA256. This is good enough to read the EOF record on the file, which gives
+ // the actual size of stream 0.
+ int total_data_size = GetDataSizeFromFileSize(key_.size(), file_size);
out_entry_stat->set_data_size(0, 0);
- out_entry_stat->set_data_size(1, total_data_size - sizeof(SimpleFileEOF));
+ out_entry_stat->set_data_size(
+ 1,
+ total_data_size - sizeof(net::SHA256HashValue) - sizeof(SimpleFileEOF));
bool has_crc32;
+ bool has_key_sha256;
uint32_t read_crc32;
int stream_0_size;
- int ret_value_crc32 = GetEOFRecordData(
- 0, *out_entry_stat, &has_crc32, &read_crc32, &stream_0_size);
+ int ret_value_crc32 =
+ GetEOFRecordData(0, *out_entry_stat, &has_crc32, &has_key_sha256,
+ &read_crc32, &stream_0_size);
if (ret_value_crc32 != net::OK)
return ret_value_crc32;
-
- if (stream_0_size > out_entry_stat->data_size(1))
+ // Calculate and set the real values for data size.
+ int stream_1_size = out_entry_stat->data_size(1) - stream_0_size;
+ if (!has_key_sha256)
+ stream_1_size += sizeof(net::SHA256HashValue);
+ if (stream_1_size < 0)
return net::ERR_FAILED;
-
- // These are the real values of data size.
out_entry_stat->set_data_size(0, stream_0_size);
- out_entry_stat->set_data_size(
- 1, out_entry_stat->data_size(1) - stream_0_size);
+ out_entry_stat->set_data_size(1, stream_1_size);
// Put stream 0 data in memory.
*stream_0_data = new net::GrowableIOBuffer();
- (*stream_0_data)->SetCapacity(stream_0_size);
- int file_offset = out_entry_stat->GetOffsetInFile(key_, 0, 0);
- File* file = const_cast<File*>(&files_[0]);
- int bytes_read =
- file->Read(file_offset, (*stream_0_data)->data(), stream_0_size);
- if (bytes_read != stream_0_size)
+ (*stream_0_data)->SetCapacity(stream_0_size + sizeof(net::SHA256HashValue));
+ int file_offset = out_entry_stat->GetOffsetInFile(key_.size(), 0, 0);
+ int read_size = stream_0_size;
+ if (has_key_sha256)
+ read_size += sizeof(net::SHA256HashValue);
+ if (files_[0].Read(file_offset, (*stream_0_data)->data(), read_size) !=
+ read_size)
return net::ERR_FAILED;
// Check the CRC32.
@@ -1117,6 +1216,27 @@ int SimpleSynchronousEntry::ReadAndValidateStream0(
return net::ERR_FAILED;
}
*out_stream_0_crc32 = expected_crc32;
+
+ // If present, check the key SHA256.
+ if (has_key_sha256) {
+ net::SHA256HashValue hash_value;
+ CalculateSHA256OfKey(key_, &hash_value);
+ bool matched =
+ std::memcmp(&hash_value, (*stream_0_data)->data() + stream_0_size,
+ sizeof(hash_value)) == 0;
+ if (!matched) {
+ RecordKeySHA256Result(cache_type_, KeySHA256Result::NO_MATCH);
+ return net::ERR_FAILED;
+ }
+ RecordKeySHA256Result(cache_type_, KeySHA256Result::MATCHED);
+ } else {
+ RecordKeySHA256Result(cache_type_, KeySHA256Result::NOT_PRESENT);
+ }
+
+ // Ensure the key is validated before completion.
+ if (!has_key_sha256 && header_and_key_check_needed_[0])
+ CheckHeaderAndKey(0);
+
RecordCheckEOFResult(cache_type_, CHECK_EOF_RESULT_SUCCESS);
return net::OK;
}
@@ -1124,10 +1244,11 @@ int SimpleSynchronousEntry::ReadAndValidateStream0(
int SimpleSynchronousEntry::GetEOFRecordData(int index,
const SimpleEntryStat& entry_stat,
bool* out_has_crc32,
+ bool* out_has_key_sha256,
uint32_t* out_crc32,
int* out_data_size) const {
SimpleFileEOF eof_record;
- int file_offset = entry_stat.GetEOFOffsetInFile(key_, index);
+ int file_offset = entry_stat.GetEOFOffsetInFile(key_.size(), index);
int file_index = GetFileIndexFromStreamIndex(index);
File* file = const_cast<File*>(&files_[file_index]);
if (file->Read(file_offset, reinterpret_cast<char*>(&eof_record),
@@ -1145,6 +1266,9 @@ int SimpleSynchronousEntry::GetEOFRecordData(int index,
*out_has_crc32 = (eof_record.flags & SimpleFileEOF::FLAG_HAS_CRC32) ==
SimpleFileEOF::FLAG_HAS_CRC32;
+ *out_has_key_sha256 =
+ (eof_record.flags & SimpleFileEOF::FLAG_HAS_KEY_SHA256) ==
+ SimpleFileEOF::FLAG_HAS_KEY_SHA256;
*out_crc32 = eof_record.data_crc32;
*out_data_size = eof_record.stream_size;
SIMPLE_CACHE_UMA(BOOLEAN, "SyncCheckEOFHasCrc", cache_type_, *out_has_crc32);
@@ -1381,7 +1505,7 @@ bool SimpleSynchronousEntry::ReadSparseRange(const SparseRange* range,
return false;
}
}
- // TODO(ttuttle): Incremental crc32 calculation?
+ // TODO(juliatuttle): Incremental crc32 calculation?
return true;
}
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.h b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
index 963f665b931..3c939a835a5 100644
--- a/chromium/net/disk_cache/simple/simple_synchronous_entry.h
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
@@ -9,14 +9,15 @@
#include <algorithm>
#include <map>
+#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "base/files/file.h"
#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "net/base/cache_type.h"
#include "net/base/net_export.h"
@@ -27,6 +28,8 @@ class GrowableIOBuffer;
class IOBuffer;
}
+FORWARD_DECLARE_TEST(DiskCacheBackendTest, SimpleCacheEnumerationLongKeys);
+
namespace disk_cache {
class SimpleSynchronousEntry;
@@ -41,12 +44,10 @@ class NET_EXPORT_PRIVATE SimpleEntryStat {
const int32_t data_size[],
const int32_t sparse_data_size);
- int GetOffsetInFile(const std::string& key,
- int offset,
- int stream_index) const;
- int GetEOFOffsetInFile(const std::string& key, int stream_index) const;
- int GetLastEOFOffsetInFile(const std::string& key, int file_index) const;
- int64_t GetFileSize(const std::string& key, int file_index) const;
+ int GetOffsetInFile(size_t key_length, int offset, int stream_index) const;
+ int GetEOFOffsetInFile(size_t key_length, int stream_index) const;
+ int GetLastEOFOffsetInFile(size_t key_length, int file_index) const;
+ int64_t GetFileSize(size_t key_length, int file_index) const;
base::Time last_used() const { return last_used_; }
base::Time last_modified() const { return last_modified_; }
@@ -114,8 +115,12 @@ class SimpleSynchronousEntry {
bool doomed;
};
+ // Opens a disk cache entry on disk. The |key| parameter is optional, if empty
+ // the operation may be slower. The |entry_hash| parameter is required.
+ // |had_index| is provided only for histograms.
static void OpenEntry(net::CacheType cache_type,
const base::FilePath& path,
+ const std::string& key,
uint64_t entry_hash,
bool had_index,
SimpleEntryCreationResults* out_results);
@@ -150,7 +155,7 @@ class SimpleSynchronousEntry {
net::IOBuffer* out_buf,
uint32_t* out_crc32,
SimpleEntryStat* entry_stat,
- int* out_result) const;
+ int* out_result);
void WriteData(const EntryOperationData& in_entry_op,
net::IOBuffer* in_buf,
SimpleEntryStat* out_entry_stat,
@@ -176,13 +181,16 @@ class SimpleSynchronousEntry {
// Close all streams, and add write EOF records to streams indicated by the
// CRCRecord entries in |crc32s_to_write|.
void Close(const SimpleEntryStat& entry_stat,
- scoped_ptr<std::vector<CRCRecord> > crc32s_to_write,
+ std::unique_ptr<std::vector<CRCRecord>> crc32s_to_write,
net::GrowableIOBuffer* stream_0_data);
const base::FilePath& path() const { return path_; }
std::string key() const { return key_; }
private:
+ FRIEND_TEST_ALL_PREFIXES(::DiskCacheBackendTest,
+ SimpleCacheEnumerationLongKeys);
+
enum CreateEntryResult {
CREATE_ENTRY_SUCCESS = 0,
CREATE_ENTRY_PLATFORM_FILE_ERROR = 1,
@@ -207,10 +215,16 @@ class SimpleSynchronousEntry {
}
};
+ // When opening an entry without knowing the key, the header must be read
+ // without knowing the size of the key. This is how much to read initially, to
+ // make it likely the entire key is read.
+ static const size_t kInitialHeaderRead = 64 * 1024;
+
SimpleSynchronousEntry(net::CacheType cache_type,
const base::FilePath& path,
const std::string& key,
- uint64_t entry_hash);
+ uint64_t entry_hash,
+ bool had_index);
// Like Entry, the SimpleSynchronousEntry self releases when Close() is
// called.
@@ -227,18 +241,19 @@ class SimpleSynchronousEntry {
bool MaybeCreateFile(int file_index,
FileRequired file_required,
base::File::Error* out_error);
- bool OpenFiles(bool had_index,
- SimpleEntryStat* out_entry_stat);
- bool CreateFiles(bool had_index,
- SimpleEntryStat* out_entry_stat);
+ bool OpenFiles(SimpleEntryStat* out_entry_stat);
+ bool CreateFiles(SimpleEntryStat* out_entry_stat);
void CloseFile(int index);
void CloseFiles();
- // Returns a net error, i.e. net::OK on success. |had_index| is passed
- // from the main entry for metrics purposes, and is true if the index was
- // initialized when the open operation began.
- int InitializeForOpen(bool had_index,
- SimpleEntryStat* out_entry_stat,
+ // Read the header and key at the beginning of the file, and validate that
+ // they are correct. If this entry was opened with a key, the key is checked
+ // for a match. If not, then the |key_| member is set based on the value in
+ // this header. Records histograms if any check is failed.
+ bool CheckHeaderAndKey(int file_index);
+
+ // Returns a net error, i.e. net::OK on success.
+ int InitializeForOpen(SimpleEntryStat* out_entry_stat,
scoped_refptr<net::GrowableIOBuffer>* stream_0_data,
uint32_t* out_stream_0_crc32);
@@ -248,22 +263,21 @@ class SimpleSynchronousEntry {
bool InitializeCreatedFile(int index, CreateEntryResult* out_result);
// Returns a net error, including net::OK on success and net::FILE_EXISTS
- // when the entry already exists. |had_index| is passed from the main entry
- // for metrics purposes, and is true if the index was initialized when the
- // create operation began.
- int InitializeForCreate(bool had_index, SimpleEntryStat* out_entry_stat);
+ // when the entry already exists.
+ int InitializeForCreate(SimpleEntryStat* out_entry_stat);
// Allocates and fills a buffer with stream 0 data in |stream_0_data|, then
// checks its crc32.
int ReadAndValidateStream0(
- int total_data_size,
+ int file_size,
SimpleEntryStat* out_entry_stat,
scoped_refptr<net::GrowableIOBuffer>* stream_0_data,
- uint32_t* out_stream_0_crc32) const;
+ uint32_t* out_stream_0_crc32);
int GetEOFRecordData(int index,
const SimpleEntryStat& entry_stat,
bool* out_has_crc32,
+ bool* out_has_key_sha256,
uint32_t* out_crc32,
int* out_data_size) const;
void Doom() const;
@@ -320,11 +334,19 @@ class SimpleSynchronousEntry {
const net::CacheType cache_type_;
const base::FilePath path_;
const uint64_t entry_hash_;
+ const bool had_index_;
std::string key_;
bool have_open_files_;
bool initialized_;
+ // Normally false. This is set to true when an entry is opened without
+ // checking the file headers. Any subsequent read will perform the check
+ // before completing.
+ bool header_and_key_check_needed_[kSimpleEntryFileCount] = {
+ false,
+ };
+
base::File files_[kSimpleEntryFileCount];
// True if the corresponding stream is empty and therefore no on-disk file
diff --git a/chromium/net/disk_cache/simple/simple_test_util.cc b/chromium/net/disk_cache/simple/simple_test_util.cc
index 2d3edcb22a1..97982ff10b8 100644
--- a/chromium/net/disk_cache/simple/simple_test_util.cc
+++ b/chromium/net/disk_cache/simple/simple_test_util.cc
@@ -6,17 +6,22 @@
#include "base/files/file.h"
#include "base/files/file_path.h"
+#include "net/base/hash_value.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
#include "net/disk_cache/simple/simple_util.h"
namespace disk_cache {
namespace simple_util {
+using base::File;
+using base::FilePath;
+
bool CreateCorruptFileForTests(const std::string& key,
- const base::FilePath& cache_path) {
- base::FilePath entry_file_path = cache_path.AppendASCII(
+ const FilePath& cache_path) {
+ FilePath entry_file_path = cache_path.AppendASCII(
disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
- int flags = base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE;
- base::File entry_file(entry_file_path, flags);
+ int flags = File::FLAG_CREATE_ALWAYS | File::FLAG_WRITE;
+ File entry_file(entry_file_path, flags);
if (!entry_file.IsValid())
return false;
@@ -24,5 +29,73 @@ bool CreateCorruptFileForTests(const std::string& key,
return entry_file.Write(0, "dummy", 1) == 1;
}
+bool RemoveKeySHA256FromEntry(const std::string& key,
+ const FilePath& cache_path) {
+ FilePath entry_file_path = cache_path.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
+ int flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE;
+ File entry_file(entry_file_path, flags);
+ if (!entry_file.IsValid())
+ return false;
+ int file_length = entry_file.GetLength();
+ SimpleFileEOF eof_record;
+ if (entry_file.Read(file_length - sizeof(eof_record),
+ reinterpret_cast<char*>(&eof_record),
+ sizeof(eof_record)) != sizeof(eof_record)) {
+ return false;
+ }
+ if (eof_record.final_magic_number != disk_cache::kSimpleFinalMagicNumber ||
+ (eof_record.flags & SimpleFileEOF::FLAG_HAS_KEY_SHA256) !=
+ SimpleFileEOF::FLAG_HAS_KEY_SHA256) {
+ return false;
+ }
+ // Remove the key SHA256 flag, and rewrite the header on top of the
+ // SHA256. Truncate the file afterwards, and we have an identical entry
+ // lacking a key SHA256.
+ eof_record.flags &= ~SimpleFileEOF::FLAG_HAS_KEY_SHA256;
+ if (entry_file.Write(
+ file_length - sizeof(eof_record) - sizeof(net::SHA256HashValue),
+ reinterpret_cast<char*>(&eof_record),
+ sizeof(eof_record)) != sizeof(eof_record)) {
+ return false;
+ }
+ if (!entry_file.SetLength(file_length - sizeof(net::SHA256HashValue))) {
+ return false;
+ }
+ return true;
+}
+
+bool CorruptKeySHA256FromEntry(const std::string& key,
+ const base::FilePath& cache_path) {
+ FilePath entry_file_path = cache_path.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
+ int flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE;
+ File entry_file(entry_file_path, flags);
+ if (!entry_file.IsValid())
+ return false;
+ int file_length = entry_file.GetLength();
+ SimpleFileEOF eof_record;
+ if (entry_file.Read(file_length - sizeof(eof_record),
+ reinterpret_cast<char*>(&eof_record),
+ sizeof(eof_record)) != sizeof(eof_record)) {
+ return false;
+ }
+ if (eof_record.final_magic_number != disk_cache::kSimpleFinalMagicNumber ||
+ (eof_record.flags & SimpleFileEOF::FLAG_HAS_KEY_SHA256) !=
+ SimpleFileEOF::FLAG_HAS_KEY_SHA256) {
+ return false;
+ }
+
+ const char corrupt_data[] = "corrupt data";
+ static_assert(sizeof(corrupt_data) <= sizeof(net::SHA256HashValue),
+ "corrupt data should not be larger than a SHA-256");
+ if (entry_file.Write(
+ file_length - sizeof(eof_record) - sizeof(net::SHA256HashValue),
+ corrupt_data, sizeof(corrupt_data)) != sizeof(corrupt_data)) {
+ return false;
+ }
+ return true;
+}
+
} // namespace simple_util
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_test_util.h b/chromium/net/disk_cache/simple/simple_test_util.h
index 95ab0550e35..cef6b47c868 100644
--- a/chromium/net/disk_cache/simple/simple_test_util.h
+++ b/chromium/net/disk_cache/simple/simple_test_util.h
@@ -43,6 +43,14 @@ class ImmutableArray {
bool CreateCorruptFileForTests(const std::string& key,
const base::FilePath& cache_path);
+// Removes the key SHA256 from an entry.
+bool RemoveKeySHA256FromEntry(const std::string& key,
+ const base::FilePath& cache_path);
+
+// Modifies the key SHA256 from an entry so that it is corrupt.
+bool CorruptKeySHA256FromEntry(const std::string& key,
+ const base::FilePath& cache_path);
+
} // namespace simple_backend
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_util.cc b/chromium/net/disk_cache/simple/simple_util.cc
index a81c28ff4f6..eac565bc7b6 100644
--- a/chromium/net/disk_cache/simple/simple_util.cc
+++ b/chromium/net/disk_cache/simple/simple_util.cc
@@ -97,17 +97,19 @@ std::string GetFilenameFromKeyAndFileIndex(const std::string& key,
base::StringPrintf("_%1d", file_index);
}
-int32_t GetDataSizeFromKeyAndFileSize(const std::string& key,
- int64_t file_size) {
+size_t GetHeaderSize(size_t key_length) {
+ return sizeof(SimpleFileHeader) + key_length;
+}
+
+int32_t GetDataSizeFromFileSize(size_t key_length, int64_t file_size) {
int64_t data_size =
- file_size - key.size() - sizeof(SimpleFileHeader) - sizeof(SimpleFileEOF);
+ file_size - key_length - sizeof(SimpleFileHeader) - sizeof(SimpleFileEOF);
return base::checked_cast<int32_t>(data_size);
}
-int64_t GetFileSizeFromKeyAndDataSize(const std::string& key,
- int32_t data_size) {
- return data_size + key.size() + sizeof(SimpleFileHeader) +
- sizeof(SimpleFileEOF);
+int64_t GetFileSizeFromDataSize(size_t key_length, int32_t data_size) {
+ return data_size + key_length + sizeof(SimpleFileHeader) +
+ sizeof(SimpleFileEOF);
}
int GetFileIndexFromStreamIndex(int stream_index) {
diff --git a/chromium/net/disk_cache/simple/simple_util.h b/chromium/net/disk_cache/simple/simple_util.h
index faf36b6a90c..aa3eb036ff0 100644
--- a/chromium/net/disk_cache/simple/simple_util.h
+++ b/chromium/net/disk_cache/simple/simple_util.h
@@ -53,15 +53,19 @@ std::string GetFilenameFromEntryHashAndFileIndex(uint64_t entry_hash,
// Given a |key| for an entry, returns the name of the sparse data file.
std::string GetSparseFilenameFromEntryHash(uint64_t entry_hash);
+// Given the size of a key, the size in bytes of the header at the beginning
+// of a simple cache file.
+size_t GetHeaderSize(size_t key_length);
+
// Given the size of a file holding a stream in the simple backend and the key
// to an entry, returns the number of bytes in the stream.
-NET_EXPORT_PRIVATE int32_t GetDataSizeFromKeyAndFileSize(const std::string& key,
- int64_t file_size);
+NET_EXPORT_PRIVATE int32_t GetDataSizeFromFileSize(size_t key_length,
+ int64_t file_size);
// Given the size of a stream in the simple backend and the key to an entry,
// returns the number of bytes in the file.
-NET_EXPORT_PRIVATE int64_t GetFileSizeFromKeyAndDataSize(const std::string& key,
- int32_t data_size);
+NET_EXPORT_PRIVATE int64_t GetFileSizeFromDataSize(size_t key_length,
+ int32_t data_size);
// Given the stream index, returns the number of the file the stream is stored
// in.
diff --git a/chromium/net/disk_cache/simple/simple_util_unittest.cc b/chromium/net/disk_cache/simple/simple_util_unittest.cc
index 2956f27f17d..442dc425f8c 100644
--- a/chromium/net/disk_cache/simple/simple_util_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_util_unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <stdint.h>
+#include <string>
#include "base/logging.h"
#include "net/disk_cache/simple/simple_util.h"
@@ -12,8 +13,8 @@ using disk_cache::simple_util::ConvertEntryHashKeyToHexString;
using disk_cache::simple_util::GetEntryHashKeyAsHexString;
using disk_cache::simple_util::GetEntryHashKeyFromHexString;
using disk_cache::simple_util::GetEntryHashKey;
-using disk_cache::simple_util::GetFileSizeFromKeyAndDataSize;
-using disk_cache::simple_util::GetDataSizeFromKeyAndFileSize;
+using disk_cache::simple_util::GetFileSizeFromDataSize;
+using disk_cache::simple_util::GetDataSizeFromFileSize;
class SimpleUtilTest : public testing::Test {};
@@ -70,8 +71,8 @@ TEST_F(SimpleUtilTest, GetEntryHashKeyFromHexString) {
}
TEST_F(SimpleUtilTest, SizesAndOffsets) {
- const char key[] = "This is an example key";
+ const std::string key("This is an example key");
const int data_size = 1000;
- const int file_size = GetFileSizeFromKeyAndDataSize(key, data_size);
- EXPECT_EQ(data_size, GetDataSizeFromKeyAndFileSize(key, file_size));
+ const int file_size = GetFileSizeFromDataSize(key.size(), data_size);
+ EXPECT_EQ(data_size, GetDataSizeFromFileSize(key.size(), file_size));
}
diff --git a/chromium/net/disk_cache/simple/simple_version_upgrade.cc b/chromium/net/disk_cache/simple/simple_version_upgrade.cc
index 91d1b9c5f35..460d6b18bdd 100644
--- a/chromium/net/disk_cache/simple/simple_version_upgrade.cc
+++ b/chromium/net/disk_cache/simple/simple_version_upgrade.cc
@@ -161,39 +161,44 @@ bool UpgradeSimpleCacheOnDisk(const base::FilePath& path) {
LOG(ERROR) << "Inconsistent cache version.";
return false;
}
- bool upgrade_needed = (version_from != kSimpleVersion);
- if (version_from == kMinVersionAbleToUpgrade) {
- // Upgrade only the index for V4 -> V5 move.
+ bool new_fake_index_needed = (version_from != kSimpleVersion);
+
+ // There should be one upgrade routine here for each incremental upgrade
+ // starting at kMinVersionAbleToUpgrade.
+ static_assert(kMinVersionAbleToUpgrade == 5, "upgrade routines don't match");
+ DCHECK_LE(5U, version_from);
+ if (version_from == 5) {
+ // Upgrade only the index for V5 -> V6 move.
if (!UpgradeIndexV5V6(path)) {
LogMessageFailedUpgradeFromVersion(file_header.version);
return false;
}
version_from++;
}
- if (version_from == kSimpleVersion) {
- if (!upgrade_needed) {
- return true;
- } else {
- const base::FilePath temp_fake_index = path.AppendASCII("upgrade-index");
- if (!WriteFakeIndexFile(temp_fake_index)) {
- base::DeleteFile(temp_fake_index, /* recursive = */ false);
- LOG(ERROR) << "Failed to write a new fake index.";
- LogMessageFailedUpgradeFromVersion(file_header.version);
- return false;
- }
- if (!base::ReplaceFile(temp_fake_index, fake_index, NULL)) {
- LOG(ERROR) << "Failed to replace the fake index.";
- LogMessageFailedUpgradeFromVersion(file_header.version);
- return false;
- }
- return true;
- }
+ DCHECK_LE(6U, version_from);
+ if (version_from == 6) {
+ // No upgrade from V6 -> V7, because the entry format has not changed and
+ // the V7 index reader is backwards compatible.
+ version_from++;
}
- // Verify during the test stage that the upgraders are implemented for all
- // versions. The release build would cause backend initialization failure
- // which would then later lead to removing all files known to the backend.
DCHECK_EQ(kSimpleVersion, version_from);
- return false;
+
+ if (!new_fake_index_needed)
+ return true;
+
+ const base::FilePath temp_fake_index = path.AppendASCII("upgrade-index");
+ if (!WriteFakeIndexFile(temp_fake_index)) {
+ base::DeleteFile(temp_fake_index, /* recursive = */ false);
+ LOG(ERROR) << "Failed to write a new fake index.";
+ LogMessageFailedUpgradeFromVersion(file_header.version);
+ return false;
+ }
+ if (!base::ReplaceFile(temp_fake_index, fake_index, NULL)) {
+ LOG(ERROR) << "Failed to replace the fake index.";
+ LogMessageFailedUpgradeFromVersion(file_header.version);
+ return false;
+ }
+ return true;
}
} // namespace disk_cache