summaryrefslogtreecommitdiff
path: root/chromium/net/disk_cache
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2014-03-18 13:16:26 +0100
committerFrederik Gladhorn <frederik.gladhorn@digia.com>2014-03-20 15:55:39 +0100
commit3f0f86b0caed75241fa71c95a5d73bc0164348c5 (patch)
tree92b9fb00f2e9e90b0be2262093876d4f43b6cd13 /chromium/net/disk_cache
parente90d7c4b152c56919d963987e2503f9909a666d2 (diff)
downloadqtwebengine-chromium-3f0f86b0caed75241fa71c95a5d73bc0164348c5.tar.gz
Update to new stable branch 1750
This also includes an updated ninja and chromium dependencies needed on Windows. Change-Id: Icd597d80ed3fa4425933c9f1334c3c2e31291c42 Reviewed-by: Zoltan Arvai <zarvai@inf.u-szeged.hu> Reviewed-by: Zeno Albisser <zeno.albisser@digia.com>
Diffstat (limited to 'chromium/net/disk_cache')
-rw-r--r--chromium/net/disk_cache/backend_impl.cc48
-rw-r--r--chromium/net/disk_cache/backend_impl.h3
-rw-r--r--chromium/net/disk_cache/backend_unittest.cc83
-rw-r--r--chromium/net/disk_cache/block_files.cc2
-rw-r--r--chromium/net/disk_cache/block_files_unittest.cc16
-rw-r--r--chromium/net/disk_cache/cache_creator.cc4
-rw-r--r--chromium/net/disk_cache/cache_util.cc44
-rw-r--r--chromium/net/disk_cache/cache_util.h7
-rw-r--r--chromium/net/disk_cache/cache_util_posix.cc2
-rw-r--r--chromium/net/disk_cache/cache_util_unittest.cc16
-rw-r--r--chromium/net/disk_cache/entry_unittest.cc204
-rw-r--r--chromium/net/disk_cache/file.h3
-rw-r--r--chromium/net/disk_cache/file_ios.cc312
-rw-r--r--chromium/net/disk_cache/file_posix.cc7
-rw-r--r--chromium/net/disk_cache/file_win.cc4
-rw-r--r--chromium/net/disk_cache/mem_backend_impl.cc8
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_impl.cc57
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_format.cc5
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_format.h10
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.cc248
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.h33
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_operation.cc139
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_operation.h44
-rw-r--r--chromium/net/disk_cache/simple/simple_histogram_macros.h39
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file.cc7
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file.h8
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file_unittest.cc91
-rw-r--r--chromium/net/disk_cache/simple/simple_index_unittest.cc2
-rw-r--r--chromium/net/disk_cache/simple/simple_net_log_parameters.h6
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.cc551
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.h97
-rw-r--r--chromium/net/disk_cache/simple/simple_util.cc6
-rw-r--r--chromium/net/disk_cache/simple/simple_util.h3
-rw-r--r--chromium/net/disk_cache/simple/simple_version_upgrade_unittest.cc6
-rw-r--r--chromium/net/disk_cache/v3/backend_worker.cc2
35 files changed, 1812 insertions, 305 deletions
diff --git a/chromium/net/disk_cache/backend_impl.cc b/chromium/net/disk_cache/backend_impl.cc
index 0f8c3fdd195..9e2131715db 100644
--- a/chromium/net/disk_cache/backend_impl.cc
+++ b/chromium/net/disk_cache/backend_impl.cc
@@ -45,7 +45,6 @@ const char* kIndexName = "index";
// for most users.
const int k64kEntriesStore = 240 * 1000 * 1000;
const int kBaseTableLen = 64 * 1024;
-const int kDefaultCacheSize = 80 * 1024 * 1024;
// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
const int kTrimDelay = 10;
@@ -108,37 +107,6 @@ void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
namespace disk_cache {
-// Returns the preferred maximum number of bytes for the cache given the
-// number of available bytes.
-int PreferedCacheSize(int64 available) {
- // Return 80% of the available space if there is not enough space to use
- // kDefaultCacheSize.
- if (available < kDefaultCacheSize * 10 / 8)
- return static_cast<int32>(available * 8 / 10);
-
- // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
- if (available < kDefaultCacheSize * 10)
- return kDefaultCacheSize;
-
- // Return 10% of the available space if the target size
- // (2.5 * kDefaultCacheSize) is more than 10%.
- if (available < static_cast<int64>(kDefaultCacheSize) * 25)
- return static_cast<int32>(available / 10);
-
- // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
- // of the available space.
- if (available < static_cast<int64>(kDefaultCacheSize) * 250)
- return kDefaultCacheSize * 5 / 2;
-
- // Return 1% of the available space if it does not exceed kint32max.
- if (available < static_cast<int64>(kint32max) * 100)
- return static_cast<int32>(available / 100);
-
- return kint32max;
-}
-
-// ------------------------------------------------------------------------
-
BackendImpl::BackendImpl(const base::FilePath& path,
base::MessageLoopProxy* cache_thread,
net::NetLog* net_log)
@@ -337,6 +305,8 @@ void BackendImpl::CleanupCache() {
// This is a net_unittest, verify that we are not 'leaking' entries.
File::WaitForPendingIO(&num_pending_io_);
DCHECK(!num_refs_);
+ } else {
+ File::DropPendingIO();
}
}
block_files_.CloseFiles();
@@ -1279,7 +1249,7 @@ bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
}
bool BackendImpl::InitBackingStore(bool* file_created) {
- if (!file_util::CreateDirectory(path_))
+ if (!base::CreateDirectory(path_))
return false;
base::FilePath index_name = path_.AppendASCII(kIndexName);
@@ -1338,12 +1308,7 @@ void BackendImpl::AdjustMaxCacheSize(int table_len) {
if (table_len)
available += data_->header.num_bytes;
- max_size_ = PreferedCacheSize(available);
-
- // Let's not use more than the default size while we tune-up the performance
- // of bigger caches. TODO(rvargas): remove this limit.
- if (max_size_ > kDefaultCacheSize * 4)
- max_size_ = kDefaultCacheSize * 4;
+ max_size_ = PreferredCacheSize(available);
if (!table_len)
return;
@@ -1896,6 +1861,11 @@ void BackendImpl::ReportStats() {
stats_.SetCounter(Stats::DOOM_CACHE, 0);
stats_.SetCounter(Stats::DOOM_RECENT, 0);
+ int age = (Time::Now() -
+ Time::FromInternalValue(data_->header.create_time)).InHours();
+ if (age)
+ CACHE_UMA(HOURS, "FilesAge", 0, age);
+
int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
if (!data_->header.create_time || !data_->header.lru.filled) {
int cause = data_->header.create_time ? 0 : 1;
diff --git a/chromium/net/disk_cache/backend_impl.h b/chromium/net/disk_cache/backend_impl.h
index 982bee64818..61b95b3ab0b 100644
--- a/chromium/net/disk_cache/backend_impl.h
+++ b/chromium/net/disk_cache/backend_impl.h
@@ -392,9 +392,6 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend {
DISALLOW_COPY_AND_ASSIGN(BackendImpl);
};
-// Returns the preferred max cache size given the available disk space.
-NET_EXPORT_PRIVATE int PreferedCacheSize(int64 available);
-
} // namespace disk_cache
#endif // NET_DISK_CACHE_BACKEND_IMPL_H_
diff --git a/chromium/net/disk_cache/backend_unittest.cc b/chromium/net/disk_cache/backend_unittest.cc
index 6ccd1e0224a..f3c02fdf09f 100644
--- a/chromium/net/disk_cache/backend_unittest.cc
+++ b/chromium/net/disk_cache/backend_unittest.cc
@@ -489,7 +489,7 @@ TEST_F(DiskCacheBackendTest, ExternalFiles) {
// And verify that the first file is still there.
scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
- ASSERT_EQ(kSize, file_util::ReadFile(filename, buffer2->data(), kSize));
+ ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
}
@@ -518,9 +518,13 @@ void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
base::MessageLoop::current()->RunUntilIdle();
+#if !defined(OS_IOS)
// Wait for the actual operation to complete, or we'll keep a file handle that
- // may cause issues later.
+ // may cause issues later. Note that on iOS systems even though this test
+ // uses a single thread, the actual IO is posted to a worker thread and the
+ // cache destructor breaks the link to reach cb when the operation completes.
rv = cb.GetResult(rv);
+#endif
}
TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
@@ -543,6 +547,8 @@ TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
}
#endif
+// See crbug.com/330074
+#if !defined(OS_IOS)
// Tests that one cache instance is not affected by another one going away.
TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
base::ScopedTempDir store;
@@ -576,6 +582,7 @@ TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
// may cause issues later.
rv = cb.GetResult(rv);
}
+#endif
// Tests that we deal with background-thread pending operations.
void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
@@ -2898,51 +2905,51 @@ TEST_F(DiskCacheTest, MultipleInstances) {
// Test the six regions of the curve that determines the max cache size.
TEST_F(DiskCacheTest, AutomaticMaxSize) {
- const int kDefaultSize = 80 * 1024 * 1024;
- int64 large_size = kDefaultSize;
- int64 largest_size = kint32max;
+ using disk_cache::kDefaultCacheSize;
+ int64 large_size = kDefaultCacheSize;
// Region 1: expected = available * 0.8
- EXPECT_EQ((kDefaultSize - 1) * 8 / 10,
- disk_cache::PreferedCacheSize(large_size - 1));
- EXPECT_EQ(kDefaultSize * 8 / 10,
- disk_cache::PreferedCacheSize(large_size));
- EXPECT_EQ(kDefaultSize - 1,
- disk_cache::PreferedCacheSize(large_size * 10 / 8 - 1));
+ EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
+ disk_cache::PreferredCacheSize(large_size - 1));
+ EXPECT_EQ(kDefaultCacheSize * 8 / 10,
+ disk_cache::PreferredCacheSize(large_size));
+ EXPECT_EQ(kDefaultCacheSize - 1,
+ disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
// Region 2: expected = default_size
- EXPECT_EQ(kDefaultSize,
- disk_cache::PreferedCacheSize(large_size * 10 / 8));
- EXPECT_EQ(kDefaultSize,
- disk_cache::PreferedCacheSize(large_size * 10 - 1));
+ EXPECT_EQ(kDefaultCacheSize,
+ disk_cache::PreferredCacheSize(large_size * 10 / 8));
+ EXPECT_EQ(kDefaultCacheSize,
+ disk_cache::PreferredCacheSize(large_size * 10 - 1));
// Region 3: expected = available * 0.1
- EXPECT_EQ(kDefaultSize,
- disk_cache::PreferedCacheSize(large_size * 10));
- EXPECT_EQ((kDefaultSize * 25 - 1) / 10,
- disk_cache::PreferedCacheSize(large_size * 25 - 1));
+ EXPECT_EQ(kDefaultCacheSize,
+ disk_cache::PreferredCacheSize(large_size * 10));
+ EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
+ disk_cache::PreferredCacheSize(large_size * 25 - 1));
// Region 4: expected = default_size * 2.5
- EXPECT_EQ(kDefaultSize * 25 / 10,
- disk_cache::PreferedCacheSize(large_size * 25));
- EXPECT_EQ(kDefaultSize * 25 / 10,
- disk_cache::PreferedCacheSize(large_size * 100 - 1));
- EXPECT_EQ(kDefaultSize * 25 / 10,
- disk_cache::PreferedCacheSize(large_size * 100));
- EXPECT_EQ(kDefaultSize * 25 / 10,
- disk_cache::PreferedCacheSize(large_size * 250 - 1));
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10,
+ disk_cache::PreferredCacheSize(large_size * 25));
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10,
+ disk_cache::PreferredCacheSize(large_size * 100 - 1));
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10,
+ disk_cache::PreferredCacheSize(large_size * 100));
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10,
+ disk_cache::PreferredCacheSize(large_size * 250 - 1));
// Region 5: expected = available * 0.1
- EXPECT_EQ(kDefaultSize * 25 / 10,
- disk_cache::PreferedCacheSize(large_size * 250));
- EXPECT_EQ(kint32max - 1,
- disk_cache::PreferedCacheSize(largest_size * 100 - 1));
+ int64 largest_size = kDefaultCacheSize * 4;
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10,
+ disk_cache::PreferredCacheSize(large_size * 250));
+ EXPECT_EQ(largest_size - 1,
+ disk_cache::PreferredCacheSize(largest_size * 100 - 1));
- // Region 6: expected = kint32max
- EXPECT_EQ(kint32max,
- disk_cache::PreferedCacheSize(largest_size * 100));
- EXPECT_EQ(kint32max,
- disk_cache::PreferedCacheSize(largest_size * 10000));
+ // Region 6: expected = largest possible size
+ EXPECT_EQ(largest_size,
+ disk_cache::PreferredCacheSize(largest_size * 100));
+ EXPECT_EQ(largest_size,
+ disk_cache::PreferredCacheSize(largest_size * 10000));
}
// Tests that we can "migrate" a running instance from one experiment group to
@@ -3470,8 +3477,4 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
EXPECT_TRUE(keys_to_match.empty());
}
-// TODO(pasko): Add a Simple Cache test that would simulate upgrade from the
-// version with the index file in the cache directory to the version with the
-// index file in subdirectory.
-
#endif // defined(OS_POSIX)
diff --git a/chromium/net/disk_cache/block_files.cc b/chromium/net/disk_cache/block_files.cc
index 896cdb16328..d8d13e8f2bf 100644
--- a/chromium/net/disk_cache/block_files.cc
+++ b/chromium/net/disk_cache/block_files.cc
@@ -83,7 +83,7 @@ bool BlockHeader::CreateMapBlock(int size, int* index) {
disk_cache::FileLock lock(header_);
int index_offset = j * 4 + 4 - target;
*index = current * 32 + index_offset;
- DCHECK_EQ(*index / 4, (*index + size - 1) / 4);
+ DLOG_IF(ERROR, *index / 4 != (*index + size - 1) / 4) << "Bit mismatch";
uint32 to_add = ((1 << size) - 1) << index_offset;
header_->num_entries++;
diff --git a/chromium/net/disk_cache/block_files_unittest.cc b/chromium/net/disk_cache/block_files_unittest.cc
index fa7c5dbb742..4a095c992ec 100644
--- a/chromium/net/disk_cache/block_files_unittest.cc
+++ b/chromium/net/disk_cache/block_files_unittest.cc
@@ -31,7 +31,7 @@ namespace disk_cache {
TEST_F(DiskCacheTest, BlockFiles_Grow) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
@@ -57,7 +57,7 @@ TEST_F(DiskCacheTest, BlockFiles_Grow) {
// We should be able to delete empty block files.
TEST_F(DiskCacheTest, BlockFiles_Shrink) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
@@ -80,7 +80,7 @@ TEST_F(DiskCacheTest, BlockFiles_Shrink) {
// Handling of block files not properly closed.
TEST_F(DiskCacheTest, BlockFiles_Recover) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
@@ -157,7 +157,7 @@ TEST_F(DiskCacheTest, BlockFiles_Recover) {
// Handling of truncated files.
TEST_F(DiskCacheTest, BlockFiles_ZeroSizeFile) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
@@ -178,7 +178,7 @@ TEST_F(DiskCacheTest, BlockFiles_ZeroSizeFile) {
// Handling of truncated files (non empty).
TEST_F(DiskCacheTest, BlockFiles_TruncatedFile) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
@@ -201,7 +201,7 @@ TEST_F(DiskCacheTest, BlockFiles_TruncatedFile) {
// Tests detection of out of sync counters.
TEST_F(DiskCacheTest, BlockFiles_Counters) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
@@ -256,7 +256,7 @@ TEST_F(DiskCacheTest, BlockFiles_Counters) {
// An invalid file can be detected after init.
TEST_F(DiskCacheTest, BlockFiles_InvalidFile) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
@@ -302,7 +302,7 @@ TEST_F(DiskCacheTest, BlockFiles_Stats) {
// Tests that we add and remove blocks correctly.
TEST_F(DiskCacheTest, AllocationMap) {
ASSERT_TRUE(CleanupCacheDir());
- ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+ ASSERT_TRUE(base::CreateDirectory(cache_path_));
BlockFiles files(cache_path_);
ASSERT_TRUE(files.Init(true));
diff --git a/chromium/net/disk_cache/cache_creator.cc b/chromium/net/disk_cache/cache_creator.cc
index 857d1714f7c..1fee41f2738 100644
--- a/chromium/net/disk_cache/cache_creator.cc
+++ b/chromium/net/disk_cache/cache_creator.cc
@@ -82,7 +82,8 @@ int CacheCreator::Run() {
// TODO(gavinp,pasko): Turn Simple Cache on for more cache types as
// appropriate.
if (backend_type_ == net::CACHE_BACKEND_SIMPLE &&
- (type_ == net::DISK_CACHE || type_ == net::APP_CACHE)) {
+ (type_ == net::DISK_CACHE || type_ == net::APP_CACHE ||
+ type_ == net::MEDIA_CACHE)) {
disk_cache::SimpleBackendImpl* simple_cache =
new disk_cache::SimpleBackendImpl(path_, max_bytes_, type_,
thread_.get(), net_log_);
@@ -113,6 +114,7 @@ void CacheCreator::DoCallback(int result) {
#endif
} else {
LOG(ERROR) << "Unable to create cache";
+ created_cache_.reset();
}
callback_.Run(result);
delete this;
diff --git a/chromium/net/disk_cache/cache_util.cc b/chromium/net/disk_cache/cache_util.cc
index 7389960a16a..4452e7b0aa9 100644
--- a/chromium/net/disk_cache/cache_util.cc
+++ b/chromium/net/disk_cache/cache_util.cc
@@ -48,10 +48,37 @@ base::FilePath GetTempCacheName(const base::FilePath& path,
return base::FilePath();
}
+int64 PreferredCacheSizeInternal(int64 available) {
+ using disk_cache::kDefaultCacheSize;
+ // Return 80% of the available space if there is not enough space to use
+ // kDefaultCacheSize.
+ if (available < kDefaultCacheSize * 10 / 8)
+ return available * 8 / 10;
+
+ // Return kDefaultCacheSize if it uses 10% to 80% of the available space.
+ if (available < kDefaultCacheSize * 10)
+ return kDefaultCacheSize;
+
+ // Return 10% of the available space if the target size
+ // (2.5 * kDefaultCacheSize) is more than 10%.
+ if (available < static_cast<int64>(kDefaultCacheSize) * 25)
+ return available / 10;
+
+ // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
+ // of the available space.
+ if (available < static_cast<int64>(kDefaultCacheSize) * 250)
+ return kDefaultCacheSize * 5 / 2;
+
+ // Return 1% of the available space.
+ return available / 100;
+}
+
} // namespace
namespace disk_cache {
+const int kDefaultCacheSize = 80 * 1024 * 1024;
+
void DeleteCache(const base::FilePath& path, bool remove_folder) {
if (remove_folder) {
if (!base::DeleteFile(path, /* recursive */ true))
@@ -111,4 +138,21 @@ bool DelayedCacheCleanup(const base::FilePath& full_path) {
return true;
}
+// Returns the preferred maximum number of bytes for the cache given the
+// number of available bytes.
+int PreferredCacheSize(int64 available) {
+ if (available < 0)
+ return kDefaultCacheSize;
+
+ int64 max_size = PreferredCacheSizeInternal(available);
+
+ // Limit cache size to somewhat less than kint32max to avoid potential
+ // integer overflows in cache backend implementations.
+ DCHECK(kDefaultCacheSize * 4 < kint32max);
+ if (max_size > kDefaultCacheSize * 4)
+ max_size = kDefaultCacheSize * 4;
+
+ return implicit_cast<int32>(max_size);
+}
+
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/cache_util.h b/chromium/net/disk_cache/cache_util.h
index 2005ba5e240..c4baa2d6110 100644
--- a/chromium/net/disk_cache/cache_util.h
+++ b/chromium/net/disk_cache/cache_util.h
@@ -36,6 +36,13 @@ NET_EXPORT_PRIVATE bool DeleteCacheFile(const base::FilePath& name);
// task. Used by cache creator itself or by backends for self-restart on error.
bool DelayedCacheCleanup(const base::FilePath& full_path);
+// Returns the preferred max cache size given the available disk space.
+NET_EXPORT_PRIVATE int PreferredCacheSize(int64 available);
+
+// The default cache size should not ideally be exposed, but the blockfile
+// backend uses it for reasons that include testing.
+NET_EXPORT_PRIVATE extern const int kDefaultCacheSize;
+
} // namespace disk_cache
#endif // NET_DISK_CACHE_CACHE_UTIL_H_
diff --git a/chromium/net/disk_cache/cache_util_posix.cc b/chromium/net/disk_cache/cache_util_posix.cc
index b33c560a000..59e1c505d49 100644
--- a/chromium/net/disk_cache/cache_util_posix.cc
+++ b/chromium/net/disk_cache/cache_util_posix.cc
@@ -19,7 +19,7 @@ bool MoveCache(const base::FilePath& from_path, const base::FilePath& to_path) {
// to see these directories anymore in an unmounted encrypted
// filesystem, so we just move each item in the cache to a new
// directory.
- if (!file_util::CreateDirectory(to_path)) {
+ if (!base::CreateDirectory(to_path)) {
LOG(ERROR) << "Unable to create destination cache directory.";
return false;
}
diff --git a/chromium/net/disk_cache/cache_util_unittest.cc b/chromium/net/disk_cache/cache_util_unittest.cc
index d2e76054f7f..3a05196828e 100644
--- a/chromium/net/disk_cache/cache_util_unittest.cc
+++ b/chromium/net/disk_cache/cache_util_unittest.cc
@@ -20,17 +20,17 @@ class CacheUtilTest : public PlatformTest {
file2_ = base::FilePath(cache_dir_.Append(FILE_PATH_LITERAL(".file02")));
dir1_ = base::FilePath(cache_dir_.Append(FILE_PATH_LITERAL("dir01")));
file3_ = base::FilePath(dir1_.Append(FILE_PATH_LITERAL("file03")));
- ASSERT_TRUE(file_util::CreateDirectory(cache_dir_));
- FILE *fp = file_util::OpenFile(file1_, "w");
+ ASSERT_TRUE(base::CreateDirectory(cache_dir_));
+ FILE *fp = base::OpenFile(file1_, "w");
ASSERT_TRUE(fp != NULL);
- file_util::CloseFile(fp);
- fp = file_util::OpenFile(file2_, "w");
+ base::CloseFile(fp);
+ fp = base::OpenFile(file2_, "w");
ASSERT_TRUE(fp != NULL);
- file_util::CloseFile(fp);
- ASSERT_TRUE(file_util::CreateDirectory(dir1_));
- fp = file_util::OpenFile(file3_, "w");
+ base::CloseFile(fp);
+ ASSERT_TRUE(base::CreateDirectory(dir1_));
+ fp = base::OpenFile(file3_, "w");
ASSERT_TRUE(fp != NULL);
- file_util::CloseFile(fp);
+ base::CloseFile(fp);
dest_dir_ = tmp_dir_.path().Append(FILE_PATH_LITERAL("old_Cache_001"));
dest_file1_ = base::FilePath(dest_dir_.Append(FILE_PATH_LITERAL("file01")));
dest_file2_ =
diff --git a/chromium/net/disk_cache/entry_unittest.cc b/chromium/net/disk_cache/entry_unittest.cc
index b8c9e668198..291dedfb433 100644
--- a/chromium/net/disk_cache/entry_unittest.cc
+++ b/chromium/net/disk_cache/entry_unittest.cc
@@ -1931,7 +1931,7 @@ void DiskCacheEntryTest::UpdateSparseEntry() {
entry1->Close();
entry2->Close();
FlushQueueForTest();
- if (memory_only_)
+ if (memory_only_ || simple_cache_mode_)
EXPECT_EQ(2, cache_->GetEntryCount());
else
EXPECT_EQ(3, cache_->GetEntryCount());
@@ -1964,20 +1964,15 @@ void DiskCacheEntryTest::DoomSparseEntry() {
int64 offset = 1024;
// Write to a bunch of ranges.
for (int i = 0; i < 12; i++) {
- EXPECT_EQ(kSize,
- entry1->WriteSparseData(
- offset, buf.get(), kSize, net::CompletionCallback()));
+ EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
// Keep the second map under the default size.
- if (i < 9) {
- EXPECT_EQ(kSize,
- entry2->WriteSparseData(
- offset, buf.get(), kSize, net::CompletionCallback()));
- }
+ if (i < 9)
+ EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
offset *= 4;
}
- if (memory_only_)
+ if (memory_only_ || simple_cache_mode_)
EXPECT_EQ(2, cache_->GetEntryCount());
else
EXPECT_EQ(15, cache_->GetEntryCount());
@@ -2110,7 +2105,7 @@ void DiskCacheEntryTest::PartialSparseEntry() {
int rv;
int64 start;
net::TestCompletionCallback cb;
- if (memory_only_) {
+ if (memory_only_ || simple_cache_mode_) {
rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
EXPECT_EQ(100, cb.GetResult(rv));
EXPECT_EQ(500, start);
@@ -2129,7 +2124,7 @@ void DiskCacheEntryTest::PartialSparseEntry() {
// 1. Query before a filled 1KB block.
// 2. Query within a filled 1KB block.
// 3. Query beyond a filled 1KB block.
- if (memory_only_) {
+ if (memory_only_ || simple_cache_mode_) {
rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
EXPECT_EQ(3496, cb.GetResult(rv));
EXPECT_EQ(20000, start);
@@ -3019,7 +3014,7 @@ TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
base::FilePath entry_file_path = cache_path_.AppendASCII(
disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
base::PlatformFileInfo info;
- EXPECT_FALSE(file_util::GetFileInfo(entry_file_path, &info));
+ EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
}
}
@@ -3527,8 +3522,9 @@ TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
ASSERT_TRUE(entry_file0 != base::kInvalidPlatformFileValue);
int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
+ int sparse_data_size = 0;
disk_cache::SimpleEntryStat entry_stat(
- base::Time::Now(), base::Time::Now(), data_size);
+ base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
disk_cache::SimpleFileEOF eof_record;
ASSERT_EQ(static_cast<int>(sizeof(eof_record)), base::ReadPlatformFile(
@@ -3629,12 +3625,7 @@ TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
SetSimpleCacheMode();
InitCache();
- const int kHalfSize = 8;
- const int kSize = kHalfSize * 2;
const char key[] = "key";
- scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
- scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
- CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
disk_cache::Entry* entry;
@@ -3657,19 +3648,16 @@ TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
const int kHalfSize = 8;
const int kSize = kHalfSize * 2;
const char key[] = "key";
- scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
- scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
- CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kHalfSize, false);
disk_cache::Entry* entry;
- int buf_len;
// Create entry, write empty buffer to third stream, and close: third stream
// should still be omitted, since the entry ignores writes that don't modify
// data or change the length.
ASSERT_EQ(net::OK, CreateEntry(key, &entry));
- buf_len = WriteData(entry, 2, 0, buffer1, 0, true);
- ASSERT_EQ(0, buf_len);
+ EXPECT_EQ(0, WriteData(entry, 2, 0, buffer, 0, true));
entry->Close();
EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
@@ -3690,21 +3678,18 @@ TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
disk_cache::Entry* entry;
- int buf_len;
// Create entry, write data to third stream, and close: third stream should
// not be omitted, since it contains data. Re-open entry and ensure there
// are that many bytes in the third stream.
ASSERT_EQ(net::OK, CreateEntry(key, &entry));
- buf_len = WriteData(entry, 2, 0, buffer1, kHalfSize, true);
- ASSERT_EQ(kHalfSize, buf_len);
+ EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true));
entry->Close();
EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
ASSERT_EQ(net::OK, OpenEntry(key, &entry));
- buf_len = ReadData(entry, 2, 0, buffer2, kSize);
- ASSERT_EQ(buf_len, kHalfSize);
- // TODO: Compare data?
+ EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2, kSize));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
entry->Close();
EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
@@ -3727,7 +3712,6 @@ TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
disk_cache::Entry* entry;
- int buf_len;
// Create entry, write data to third stream, truncate third stream back to
// empty, and close: third stream will not initially be omitted, since entry
@@ -3735,17 +3719,14 @@ TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
// removes it on open if it is empty. Reopen, ensure that the file is
// deleted, and that there's no data in the third stream.
ASSERT_EQ(net::OK, CreateEntry(key, &entry));
- buf_len = WriteData(entry, 2, 0, buffer1, kHalfSize, true);
- ASSERT_EQ(kHalfSize, buf_len);
- buf_len = WriteData(entry, 2, 0, buffer1, 0, true);
- ASSERT_EQ(0, buf_len);
+ EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true));
+ EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1, 0, true));
entry->Close();
EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
ASSERT_EQ(net::OK, OpenEntry(key, &entry));
EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
- buf_len = ReadData(entry, 2, 0, buffer2, kSize);
- ASSERT_EQ(0, buf_len);
+ EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2, kSize));
entry->Close();
EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
@@ -3762,9 +3743,8 @@ TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
const int kHalfSize = 8;
const int kSize = kHalfSize * 2;
const char key[] = "key";
- scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
- scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
- CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kHalfSize, false);
disk_cache::Entry* entry;
@@ -3773,9 +3753,149 @@ TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
// that it doesn't cause the file to be created on disk.)
ASSERT_EQ(net::OK, CreateEntry(key, &entry));
entry->Doom();
- WriteData(entry, 2, 0, buffer1, kHalfSize, true);
+ WriteData(entry, 2, 0, buffer, kHalfSize, true);
entry->Close();
EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
}
+// There could be a race between Doom and an optimistic write.
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
+ // Test sequence:
+ // Create, first Write, second Write, Close.
+ // Open, Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ const int kSize = 200;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer1->data(), kSize, false);
+ CacheTestFillBuffer(buffer2->data(), kSize, false);
+
+ // The race only happens on stream 1 and stream 2.
+ for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
+ ASSERT_EQ(net::OK, DoomAllEntries());
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_NE(null, entry);
+ entry->Close();
+ entry = NULL;
+
+ ASSERT_EQ(net::OK, DoomAllEntries());
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_NE(null, entry);
+
+ int offset = 0;
+ int buf_len = kSize;
+ // This write should not be optimistic (since create is).
+ EXPECT_EQ(buf_len,
+ WriteData(entry, i, offset, buffer1.get(), buf_len, false));
+
+ offset = kSize;
+ // This write should be optimistic.
+ EXPECT_EQ(buf_len,
+ WriteData(entry, i, offset, buffer2.get(), buf_len, false));
+ entry->Close();
+
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_NE(null, entry);
+
+ entry->Close();
+ entry = NULL;
+ }
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
+ SetSimpleCacheMode();
+ InitCache();
+ BasicSparseIO();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
+ SetSimpleCacheMode();
+ InitCache();
+ HugeSparseIO();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
+ SetSimpleCacheMode();
+ InitCache();
+ GetAvailableRange();
+}
+
+TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCouldBeSparse) {
+ SetSimpleCacheMode();
+ InitCache();
+ CouldBeSparse();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ UpdateSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ DoomSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ PartialSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
+ const int kSize = 1024;
+
+ SetSimpleCacheMode();
+ // An entry is allowed sparse data 1/10 the size of the cache, so this size
+ // allows for one |kSize|-sized range plus overhead, but not two ranges.
+ SetMaxSize(kSize * 15);
+ InitCache();
+
+ const char key[] = "key";
+ disk_cache::Entry* null = NULL;
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_NE(null, entry);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ net::TestCompletionCallback callback;
+ int ret;
+
+ // Verify initial conditions.
+ ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(0, callback.GetResult(ret));
+
+ ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
+ EXPECT_EQ(0, callback.GetResult(ret));
+
+ // Write a range and make sure it reads back.
+ ret = entry->WriteSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ // Write another range and make sure it reads back.
+ ret = entry->WriteSparseData(kSize, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ // Make sure the first range was removed when the second was written.
+ ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(0, callback.GetResult(ret));
+
+ entry->Close();
+}
+
#endif // defined(OS_POSIX)
diff --git a/chromium/net/disk_cache/file.h b/chromium/net/disk_cache/file.h
index eb9a9ecc1e1..190f7cb9f82 100644
--- a/chromium/net/disk_cache/file.h
+++ b/chromium/net/disk_cache/file.h
@@ -70,6 +70,9 @@ class NET_EXPORT_PRIVATE File : public base::RefCounted<File> {
// Blocks until |num_pending_io| IO operations complete.
static void WaitForPendingIO(int* num_pending_io);
+ // Drops current pending operations without waiting for them to complete.
+ static void DropPendingIO();
+
protected:
virtual ~File();
diff --git a/chromium/net/disk_cache/file_ios.cc b/chromium/net/disk_cache/file_ios.cc
new file mode 100644
index 00000000000..9cad1551f15
--- /dev/null
+++ b/chromium/net/disk_cache/file_ios.cc
@@ -0,0 +1,312 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/threading/worker_pool.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/in_flight_io.h"
+
+namespace {
+
+// This class represents a single asynchronous IO operation while it is being
+// bounced between threads.
+class FileBackgroundIO : public disk_cache::BackgroundIO {
+ public:
+ // Other than the actual parameters for the IO operation (including the
+ // |callback| that must be notified at the end), we need the controller that
+ // is keeping track of all operations. When done, we notify the controller
+ // (we do NOT invoke the callback), in the worker thead that completed the
+ // operation.
+ FileBackgroundIO(disk_cache::File* file, const void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback,
+ disk_cache::InFlightIO* controller)
+ : disk_cache::BackgroundIO(controller), callback_(callback), file_(file),
+ buf_(buf), buf_len_(buf_len), offset_(offset) {
+ }
+
+ disk_cache::FileIOCallback* callback() {
+ return callback_;
+ }
+
+ disk_cache::File* file() {
+ return file_;
+ }
+
+ // Read and Write are the operations that can be performed asynchronously.
+ // The actual parameters for the operation are setup in the constructor of
+ // the object. Both methods should be called from a worker thread, by posting
+ // a task to the WorkerPool (they are RunnableMethods). When finished,
+ // controller->OnIOComplete() is called.
+ void Read();
+ void Write();
+
+ private:
+ virtual ~FileBackgroundIO() {}
+
+ disk_cache::FileIOCallback* callback_;
+
+ disk_cache::File* file_;
+ const void* buf_;
+ size_t buf_len_;
+ size_t offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileBackgroundIO);
+};
+
+
+// The specialized controller that keeps track of current operations.
+class FileInFlightIO : public disk_cache::InFlightIO {
+ public:
+ FileInFlightIO() {}
+ virtual ~FileInFlightIO() {}
+
+ // These methods start an asynchronous operation. The arguments have the same
+ // semantics of the File asynchronous operations, with the exception that the
+ // operation never finishes synchronously.
+ void PostRead(disk_cache::File* file, void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback);
+ void PostWrite(disk_cache::File* file, const void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback);
+
+ protected:
+ // Invokes the users' completion callback at the end of the IO operation.
+ // |cancel| is true if the actual task posted to the thread is still
+ // queued (because we are inside WaitForPendingIO), and false if said task is
+ // the one performing the call.
+ virtual void OnOperationComplete(disk_cache::BackgroundIO* operation,
+ bool cancel) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FileInFlightIO);
+};
+
+// ---------------------------------------------------------------------------
+
+// Runs on a worker thread.
+void FileBackgroundIO::Read() {
+ if (file_->Read(const_cast<void*>(buf_), buf_len_, offset_)) {
+ result_ = static_cast<int>(buf_len_);
+ } else {
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ }
+ NotifyController();
+}
+
+// Runs on a worker thread.
+void FileBackgroundIO::Write() {
+ bool rv = file_->Write(buf_, buf_len_, offset_);
+
+ result_ = rv ? static_cast<int>(buf_len_) : net::ERR_CACHE_WRITE_FAILURE;
+ NotifyController();
+}
+
+// ---------------------------------------------------------------------------
+
+void FileInFlightIO::PostRead(disk_cache::File *file, void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback *callback) {
+ scoped_refptr<FileBackgroundIO> operation(
+ new FileBackgroundIO(file, buf, buf_len, offset, callback, this));
+ file->AddRef(); // Balanced on OnOperationComplete()
+
+ base::WorkerPool::PostTask(FROM_HERE,
+ base::Bind(&FileBackgroundIO::Read, operation.get()), true);
+ OnOperationPosted(operation.get());
+}
+
+void FileInFlightIO::PostWrite(disk_cache::File* file, const void* buf,
+ size_t buf_len, size_t offset,
+ disk_cache::FileIOCallback* callback) {
+ scoped_refptr<FileBackgroundIO> operation(
+ new FileBackgroundIO(file, buf, buf_len, offset, callback, this));
+ file->AddRef(); // Balanced on OnOperationComplete()
+
+ base::WorkerPool::PostTask(FROM_HERE,
+ base::Bind(&FileBackgroundIO::Write, operation.get()), true);
+ OnOperationPosted(operation.get());
+}
+
+// Runs on the IO thread.
+void FileInFlightIO::OnOperationComplete(disk_cache::BackgroundIO* operation,
+ bool cancel) {
+ FileBackgroundIO* op = static_cast<FileBackgroundIO*>(operation);
+
+ disk_cache::FileIOCallback* callback = op->callback();
+ int bytes = operation->result();
+
+ // Release the references acquired in PostRead / PostWrite.
+ op->file()->Release();
+ callback->OnFileIOComplete(bytes);
+}
+
+// A static object tha will broker all async operations.
+FileInFlightIO* s_file_operations = NULL;
+
+// Returns the current FileInFlightIO.
+FileInFlightIO* GetFileInFlightIO() {
+ if (!s_file_operations) {
+ s_file_operations = new FileInFlightIO;
+ }
+ return s_file_operations;
+}
+
+// Deletes the current FileInFlightIO.
+void DeleteFileInFlightIO() {
+ DCHECK(s_file_operations);
+ delete s_file_operations;
+ s_file_operations = NULL;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+File::File(base::PlatformFile file)
+ : init_(true),
+ mixed_(true),
+ platform_file_(file),
+ sync_platform_file_(base::kInvalidPlatformFileValue) {
+}
+
+bool File::Init(const base::FilePath& name) {
+ if (init_)
+ return false;
+
+ int flags = base::PLATFORM_FILE_OPEN |
+ base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE;
+ platform_file_ = base::CreatePlatformFile(name, flags, NULL, NULL);
+ if (platform_file_ < 0) {
+ platform_file_ = 0;
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+base::PlatformFile File::platform_file() const {
+ return platform_file_;
+}
+
+bool File::IsValid() const {
+ if (!init_)
+ return false;
+ return (base::kInvalidPlatformFileValue != platform_file_);
+}
+
+bool File::Read(void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > static_cast<size_t>(kint32max) ||
+ offset > static_cast<size_t>(kint32max)) {
+ return false;
+ }
+
+ int ret = base::ReadPlatformFile(platform_file_, offset,
+ static_cast<char*>(buffer), buffer_len);
+ return (static_cast<size_t>(ret) == buffer_len);
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > static_cast<size_t>(kint32max) ||
+ offset > static_cast<size_t>(kint32max)) {
+ return false;
+ }
+
+ int ret = base::WritePlatformFile(platform_file_, offset,
+ static_cast<const char*>(buffer),
+ buffer_len);
+ return (static_cast<size_t>(ret) == buffer_len);
+}
+
+// We have to increase the ref counter of the file before performing the IO to
+// prevent the completion to happen with an invalid handle (if the file is
+// closed while the IO is in flight).
+bool File::Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Read(buffer, buffer_len, offset);
+ }
+
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ GetFileInFlightIO()->PostRead(this, buffer, buffer_len, offset, callback);
+
+ *completed = false;
+ return true;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Write(buffer, buffer_len, offset);
+ }
+
+ return AsyncWrite(buffer, buffer_len, offset, callback, completed);
+}
+
+bool File::SetLength(size_t length) {
+ DCHECK(init_);
+ if (length > kuint32max)
+ return false;
+
+ return base::TruncatePlatformFile(platform_file_, length);
+}
+
+size_t File::GetLength() {
+ DCHECK(init_);
+ int64 len = base::SeekPlatformFile(platform_file_,
+ base::PLATFORM_FILE_FROM_END, 0);
+
+ if (len > static_cast<int64>(kuint32max))
+ return kuint32max;
+
+ return static_cast<size_t>(len);
+}
+
+// Static.
+void File::WaitForPendingIO(int* num_pending_io) {
+ // We may be running unit tests so we should allow be able to reset the
+ // message loop.
+ GetFileInFlightIO()->WaitForPendingIO();
+ DeleteFileInFlightIO();
+}
+
+// Static.
+void File::DropPendingIO() {
+ GetFileInFlightIO()->DropPendingIO();
+ DeleteFileInFlightIO();
+}
+
+File::~File() {
+ if (IsValid())
+ base::ClosePlatformFile(platform_file_);
+}
+
+bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ GetFileInFlightIO()->PostWrite(this, buffer, buffer_len, offset, callback);
+
+ if (completed)
+ *completed = false;
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/file_posix.cc b/chromium/net/disk_cache/file_posix.cc
index 30c4a660b13..e1ff6fc4198 100644
--- a/chromium/net/disk_cache/file_posix.cc
+++ b/chromium/net/disk_cache/file_posix.cc
@@ -17,7 +17,7 @@
namespace {
// The maximum number of threads for this pool.
-const int kMaxThreads = 5;
+const int kMaxThreads = 20;
class FileWorkerPool : public base::SequencedWorkerPool {
public:
@@ -167,6 +167,11 @@ void File::WaitForPendingIO(int* num_pending_io) {
base::RunLoop().RunUntilIdle();
}
+// Static.
+void File::DropPendingIO() {
+}
+
+
File::~File() {
if (IsValid())
base::ClosePlatformFile(platform_file_);
diff --git a/chromium/net/disk_cache/file_win.cc b/chromium/net/disk_cache/file_win.cc
index dbb34f38341..1492c42c6e4 100644
--- a/chromium/net/disk_cache/file_win.cc
+++ b/chromium/net/disk_cache/file_win.cc
@@ -267,4 +267,8 @@ void File::WaitForPendingIO(int* num_pending_io) {
}
}
+// Static.
+void File::DropPendingIO() {
+}
+
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mem_backend_impl.cc b/chromium/net/disk_cache/mem_backend_impl.cc
index a6f1bf13bed..ccd868b6530 100644
--- a/chromium/net/disk_cache/mem_backend_impl.cc
+++ b/chromium/net/disk_cache/mem_backend_impl.cc
@@ -14,7 +14,7 @@ using base::Time;
namespace {
-const int kDefaultCacheSize = 10 * 1024 * 1024;
+const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024;
const int kCleanUpMargin = 1024 * 1024;
int LowWaterAdjust(int high_water) {
@@ -59,15 +59,15 @@ bool MemBackendImpl::Init() {
int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
if (total_memory <= 0) {
- max_size_ = kDefaultCacheSize;
+ max_size_ = kDefaultInMemoryCacheSize;
return true;
}
// We want to use up to 2% of the computer's memory, with a limit of 50 MB,
// reached on systemd with more than 2.5 GB of RAM.
total_memory = total_memory * 2 / 100;
- if (total_memory > kDefaultCacheSize * 5)
- max_size_ = kDefaultCacheSize * 5;
+ if (total_memory > kDefaultInMemoryCacheSize * 5)
+ max_size_ = kDefaultInMemoryCacheSize * 5;
else
max_size_ = static_cast<int32>(total_memory);
diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.cc b/chromium/net/disk_cache/simple/simple_backend_impl.cc
index 8856a2d7194..7260857e0f1 100644
--- a/chromium/net/disk_cache/simple/simple_backend_impl.cc
+++ b/chromium/net/disk_cache/simple/simple_backend_impl.cc
@@ -26,7 +26,7 @@
#include "base/threading/sequenced_worker_pool.h"
#include "base/time/time.h"
#include "net/base/net_errors.h"
-#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/cache_util.h"
#include "net/disk_cache/simple/simple_entry_format.h"
#include "net/disk_cache/simple/simple_entry_impl.h"
#include "net/disk_cache/simple/simple_histogram_macros.h"
@@ -44,7 +44,7 @@ using base::SequencedWorkerPool;
using base::SingleThreadTaskRunner;
using base::Time;
using base::DirectoryExists;
-using file_util::CreateDirectory;
+using base::CreateDirectory;
namespace disk_cache {
@@ -56,9 +56,6 @@ const int kDefaultMaxWorkerThreads = 50;
const char kThreadNamePrefix[] = "SimpleCache";
-// Cache size when all other size heuristics failed.
-const uint64 kDefaultCacheSize = 80 * 1024 * 1024;
-
// Maximum fraction of the cache that one entry can consume.
const int kMaxFileRatio = 8;
@@ -123,21 +120,11 @@ void MaybeHistogramFdLimit(net::CacheType cache_type) {
g_fd_limit_histogram_has_been_populated = true;
}
-// Must run on IO Thread.
-void DeleteBackendImpl(disk_cache::Backend** backend,
- const net::CompletionCallback& callback,
- int result) {
- DCHECK(*backend);
- delete *backend;
- *backend = NULL;
- callback.Run(result);
-}
-
// Detects if the files in the cache directory match the current disk cache
// backend type and version. If the directory contains no cache, occupies it
// with the fresh structure.
bool FileStructureConsistent(const base::FilePath& path) {
- if (!base::PathExists(path) && !file_util::CreateDirectory(path)) {
+ if (!base::PathExists(path) && !base::CreateDirectory(path)) {
LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
return false;
}
@@ -195,23 +182,6 @@ void RunOperationAndCallback(
operation_callback.Run(operation_result);
}
-// A short bindable thunk that Dooms an entry if it successfully opens.
-void DoomOpenedEntry(scoped_ptr<Entry*> in_entry,
- const net::CompletionCallback& doom_callback,
- int open_result) {
- DCHECK_NE(open_result, net::ERR_IO_PENDING);
- if (open_result == net::OK) {
- DCHECK(in_entry);
- SimpleEntryImpl* simple_entry = static_cast<SimpleEntryImpl*>(*in_entry);
- const int doom_result = simple_entry->DoomEntry(doom_callback);
- simple_entry->Close();
- if (doom_result != net::ERR_IO_PENDING)
- doom_callback.Run(doom_result);
- } else {
- doom_callback.Run(open_result);
- }
-}
-
void RecordIndexLoad(net::CacheType cache_type,
base::TimeTicks constructed_since,
int result) {
@@ -285,13 +255,15 @@ void SimpleBackendImpl::OnDeactivated(const SimpleEntryImpl* entry) {
}
void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) {
- DCHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
+ // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
+ CHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
entries_pending_doom_.insert(
std::make_pair(entry_hash, std::vector<Closure>()));
}
void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) {
- DCHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
+ // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
+ CHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
base::hash_map<uint64, std::vector<Closure> >::iterator it =
entries_pending_doom_.find(entry_hash);
std::vector<Closure> to_run_closures;
@@ -317,8 +289,9 @@ void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
// SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
const uint64 entry_hash = (*mass_doom_entry_hashes)[i];
- DCHECK(active_entries_.count(entry_hash) == 0 ||
- entries_pending_doom_.count(entry_hash) == 0)
+ // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
+ CHECK(active_entries_.count(entry_hash) == 0 ||
+ entries_pending_doom_.count(entry_hash) == 0)
<< "The entry 0x" << std::hex << entry_hash
<< " is both active and pending doom.";
if (!active_entries_.count(entry_hash) &&
@@ -339,7 +312,8 @@ void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
it = to_doom_individually_hashes.begin(),
end = to_doom_individually_hashes.end(); it != end; ++it) {
const int doom_result = DoomEntryFromHash(*it, barrier_callback);
- DCHECK_EQ(net::ERR_IO_PENDING, doom_result);
+ // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
+ CHECK_EQ(net::ERR_IO_PENDING, doom_result);
index_->Remove(*it);
}
@@ -525,12 +499,7 @@ SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
DCHECK(mtime_result);
if (!result.max_size) {
int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
- if (available < 0)
- result.max_size = kDefaultCacheSize;
- else
- // TODO(pasko): Move PreferedCacheSize() to cache_util.h. Also fix the
- // spelling.
- result.max_size = disk_cache::PreferedCacheSize(available);
+ result.max_size = disk_cache::PreferredCacheSize(available);
}
DCHECK(result.max_size);
}
diff --git a/chromium/net/disk_cache/simple/simple_entry_format.cc b/chromium/net/disk_cache/simple/simple_entry_format.cc
index d35174a9a7f..41cab2d36f2 100644
--- a/chromium/net/disk_cache/simple/simple_entry_format.cc
+++ b/chromium/net/disk_cache/simple/simple_entry_format.cc
@@ -18,4 +18,9 @@ SimpleFileEOF::SimpleFileEOF() {
std::memset(this, 0, sizeof(*this));
}
+SimpleFileSparseRangeHeader::SimpleFileSparseRangeHeader() {
+ // Make hashing repeatable: leave no padding bytes untouched.
+ std::memset(this, 0, sizeof(*this));
+}
+
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_entry_format.h b/chromium/net/disk_cache/simple/simple_entry_format.h
index 8224b858dcc..abd985c9fee 100644
--- a/chromium/net/disk_cache/simple/simple_entry_format.h
+++ b/chromium/net/disk_cache/simple/simple_entry_format.h
@@ -18,6 +18,7 @@ namespace disk_cache {
const uint64 kSimpleInitialMagicNumber = GG_UINT64_C(0xfcfb6d1ba7725c30);
const uint64 kSimpleFinalMagicNumber = GG_UINT64_C(0xf4fa6f45970d41d8);
+const uint64 kSimpleSparseRangeMagicNumber = GG_UINT64_C(0xeb97bf016553676b);
// A file containing stream 0 and stream 1 in the Simple cache consists of:
// - a SimpleFileHeader.
@@ -58,6 +59,15 @@ struct NET_EXPORT_PRIVATE SimpleFileEOF {
uint32 stream_size;
};
+struct SimpleFileSparseRangeHeader {
+ SimpleFileSparseRangeHeader();
+
+ uint64 sparse_range_magic_number;
+ int64 offset;
+ int64 length;
+ uint32 data_crc32;
+};
+
} // namespace disk_cache
#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_FORMAT_H_
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.cc b/chromium/net/disk_cache/simple/simple_entry_impl.cc
index 3d2bc22cc36..45ec1f9fd1e 100644
--- a/chromium/net/disk_cache/simple/simple_entry_impl.cc
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.cc
@@ -31,6 +31,10 @@
namespace disk_cache {
namespace {
+// An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
+// the cache.
+const int64 kMaxSparseDataSizeDivisor = 10;
+
// Used in histograms, please only add entries at the end.
enum ReadResult {
READ_RESULT_SUCCESS = 0,
@@ -170,6 +174,7 @@ SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
last_used_(Time::Now()),
last_modified_(last_used_),
+ sparse_data_size_(0),
open_count_(0),
doomed_(false),
state_(STATE_UNINITIALIZED),
@@ -463,9 +468,11 @@ int SimpleEntryImpl::ReadSparseData(int64 offset,
int buf_len,
const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_FAILED;
+
+ ScopedOperationRunner operation_runner(this);
+ pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
+ this, offset, buf_len, buf, callback));
+ return net::ERR_IO_PENDING;
}
int SimpleEntryImpl::WriteSparseData(int64 offset,
@@ -473,9 +480,11 @@ int SimpleEntryImpl::WriteSparseData(int64 offset,
int buf_len,
const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_FAILED;
+
+ ScopedOperationRunner operation_runner(this);
+ pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
+ this, offset, buf_len, buf, callback));
+ return net::ERR_IO_PENDING;
}
int SimpleEntryImpl::GetAvailableRange(int64 offset,
@@ -483,28 +492,32 @@ int SimpleEntryImpl::GetAvailableRange(int64 offset,
int64* start,
const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_FAILED;
+
+ ScopedOperationRunner operation_runner(this);
+ pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
+ this, offset, len, start, callback));
+ return net::ERR_IO_PENDING;
}
bool SimpleEntryImpl::CouldBeSparse() const {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- return false;
+ // TODO(ttuttle): Actually check.
+ return true;
}
void SimpleEntryImpl::CancelSparseIO() {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
+ // The Simple Cache does not return distinct objects for the same non-doomed
+ // entry, so there's no need to coordinate which object is performing sparse
+ // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
}
int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_NOT_IMPLEMENTED;
+ // The simple Cache does not return distinct objects for the same non-doomed
+ // entry, so there's no need to coordinate which object is performing sparse
+ // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
+ return net::OK;
}
SimpleEntryImpl::~SimpleEntryImpl() {
@@ -608,6 +621,24 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() {
operation->callback(),
operation->truncate());
break;
+ case SimpleEntryOperation::TYPE_READ_SPARSE:
+ ReadSparseDataInternal(operation->sparse_offset(),
+ operation->buf(),
+ operation->length(),
+ operation->callback());
+ break;
+ case SimpleEntryOperation::TYPE_WRITE_SPARSE:
+ WriteSparseDataInternal(operation->sparse_offset(),
+ operation->buf(),
+ operation->length(),
+ operation->callback());
+ break;
+ case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
+ GetAvailableRangeInternal(operation->sparse_offset(),
+ operation->length(),
+ operation->out_start(),
+ operation->callback());
+ break;
case SimpleEntryOperation::TYPE_DOOM:
DoomEntryInternal(operation->callback());
break;
@@ -651,7 +682,8 @@ void SimpleEntryImpl::OpenEntryInternal(bool have_index,
const base::TimeTicks start_time = base::TimeTicks::Now();
scoped_ptr<SimpleEntryCreationResults> results(
new SimpleEntryCreationResults(
- SimpleEntryStat(last_used_, last_modified_, data_size_)));
+ SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_)));
Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
cache_type_,
path_,
@@ -699,7 +731,8 @@ void SimpleEntryImpl::CreateEntryInternal(bool have_index,
const base::TimeTicks start_time = base::TimeTicks::Now();
scoped_ptr<SimpleEntryCreationResults> results(
new SimpleEntryCreationResults(
- SimpleEntryStat(last_used_, last_modified_, data_size_)));
+ SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_)));
Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
cache_type_,
path_,
@@ -746,7 +779,8 @@ void SimpleEntryImpl::CloseInternal() {
Closure task =
base::Bind(&SimpleSynchronousEntry::Close,
base::Unretained(synchronous_entry_),
- SimpleEntryStat(last_used_, last_modified_, data_size_),
+ SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_),
base::Passed(&crc32s_to_write),
stream_0_data_);
Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
@@ -825,7 +859,8 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index,
scoped_ptr<uint32> read_crc32(new uint32());
scoped_ptr<int> result(new int());
scoped_ptr<SimpleEntryStat> entry_stat(
- new SimpleEntryStat(last_used_, last_modified_, data_size_));
+ new SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_));
Closure task = base::Bind(
&SimpleSynchronousEntry::ReadData,
base::Unretained(synchronous_entry_),
@@ -908,7 +943,8 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
// |entry_stat| needs to be initialized before modifying |data_size_|.
scoped_ptr<SimpleEntryStat> entry_stat(
- new SimpleEntryStat(last_used_, last_modified_, data_size_));
+ new SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_));
if (truncate) {
data_size_[stream_index] = offset + buf_len;
} else {
@@ -944,6 +980,100 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
}
+void SimpleEntryImpl::ReadSparseDataInternal(
+ int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ DCHECK_EQ(STATE_READY, state_);
+ state_ = STATE_IO_PENDING;
+
+ scoped_ptr<int> result(new int());
+ scoped_ptr<base::Time> last_used(new base::Time());
+ Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(
+ sparse_offset, buf_len),
+ make_scoped_refptr(buf),
+ last_used.get(),
+ result.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete,
+ this,
+ callback,
+ base::Passed(&last_used),
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::WriteSparseDataInternal(
+ int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ DCHECK_EQ(STATE_READY, state_);
+ state_ = STATE_IO_PENDING;
+
+ int64 max_sparse_data_size = kint64max;
+ if (backend_.get()) {
+ int64 max_cache_size = backend_->index()->max_size();
+ max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
+ }
+
+ scoped_ptr<SimpleEntryStat> entry_stat(
+ new SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_));
+
+ last_used_ = last_modified_ = base::Time::Now();
+
+ scoped_ptr<int> result(new int());
+ Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(
+ sparse_offset, buf_len),
+ make_scoped_refptr(buf),
+ max_sparse_data_size,
+ entry_stat.get(),
+ result.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete,
+ this,
+ callback,
+ base::Passed(&entry_stat),
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::GetAvailableRangeInternal(
+ int64 sparse_offset,
+ int len,
+ int64* out_start,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ DCHECK_EQ(STATE_READY, state_);
+ state_ = STATE_IO_PENDING;
+
+ scoped_ptr<int> result(new int());
+ Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(
+ sparse_offset, len),
+ out_start,
+ result.get());
+ Closure reply = base::Bind(
+ &SimpleEntryImpl::GetAvailableRangeOperationComplete,
+ this,
+ callback,
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
PostTaskAndReplyWithResult(
worker_pool_, FROM_HERE,
@@ -1007,7 +1137,6 @@ void SimpleEntryImpl::CreationOperationComplete(
}
void SimpleEntryImpl::EntryOperationComplete(
- int stream_index,
const CompletionCallback& completion_callback,
const SimpleEntryStat& entry_stat,
scoped_ptr<int> result) {
@@ -1015,12 +1144,11 @@ void SimpleEntryImpl::EntryOperationComplete(
DCHECK(synchronous_entry_);
DCHECK_EQ(STATE_IO_PENDING, state_);
DCHECK(result);
- state_ = STATE_READY;
if (*result < 0) {
- MarkAsDoomed();
state_ = STATE_FAILURE;
- crc32s_end_offset_[stream_index] = 0;
+ MarkAsDoomed();
} else {
+ state_ = STATE_READY;
UpdateDataFromEntryStat(entry_stat);
}
@@ -1086,6 +1214,10 @@ void SimpleEntryImpl::ReadOperationComplete(
}
if (*result < 0) {
+ crc32s_end_offset_[stream_index] = 0;
+ }
+
+ if (*result < 0) {
RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
} else {
RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
@@ -1100,8 +1232,7 @@ void SimpleEntryImpl::ReadOperationComplete(
CreateNetLogReadWriteCompleteCallback(*result));
}
- EntryOperationComplete(
- stream_index, completion_callback, *entry_stat, result.Pass());
+ EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
}
void SimpleEntryImpl::WriteOperationComplete(
@@ -1118,8 +1249,47 @@ void SimpleEntryImpl::WriteOperationComplete(
CreateNetLogReadWriteCompleteCallback(*result));
}
- EntryOperationComplete(
- stream_index, completion_callback, *entry_stat, result.Pass());
+ if (*result < 0) {
+ crc32s_end_offset_[stream_index] = 0;
+ }
+
+ EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
+}
+
+void SimpleEntryImpl::ReadSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<base::Time> last_used,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK(result);
+
+ SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
+ sparse_data_size_);
+ EntryOperationComplete(completion_callback, entry_stat, result.Pass());
+}
+
+void SimpleEntryImpl::WriteSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<SimpleEntryStat> entry_stat,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK(result);
+
+ EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
+}
+
+void SimpleEntryImpl::GetAvailableRangeOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK(result);
+
+ SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
+ sparse_data_size_);
+ EntryOperationComplete(completion_callback, entry_stat, result.Pass());
}
void SimpleEntryImpl::DoomOperationComplete(
@@ -1164,11 +1334,9 @@ void SimpleEntryImpl::ChecksumOperationComplete(
CreateNetLogReadWriteCompleteCallback(*result));
}
- EntryOperationComplete(
- stream_index,
- completion_callback,
- SimpleEntryStat(last_used_, last_modified_, data_size_),
- result.Pass());
+ SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
+ sparse_data_size_);
+ EntryOperationComplete(completion_callback, entry_stat, result.Pass());
}
void SimpleEntryImpl::CloseOperationComplete() {
@@ -1193,6 +1361,7 @@ void SimpleEntryImpl::UpdateDataFromEntryStat(
for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
data_size_[i] = entry_stat.data_size(i);
}
+ sparse_data_size_ = entry_stat.sparse_data_size();
if (!doomed_ && backend_.get())
backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
}
@@ -1203,6 +1372,7 @@ int64 SimpleEntryImpl::GetDiskUsage() const {
file_size +=
simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
}
+ file_size += sparse_data_size_;
return file_size;
}
@@ -1286,7 +1456,8 @@ int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
}
memcpy(buf->data(), stream_0_data_->data() + offset, buf_len);
UpdateDataFromEntryStat(
- SimpleEntryStat(base::Time::Now(), last_modified_, data_size_));
+ SimpleEntryStat(base::Time::Now(), last_modified_, data_size_,
+ sparse_data_size_));
RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
return buf_len;
}
@@ -1296,8 +1467,8 @@ int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
int buf_len,
bool truncate) {
// Currently, stream 0 is only used for HTTP headers, and always writes them
- // with a single, truncating write. Detect these writes and record the size
- // changes of the headers. Also, support writes to stream 0 that have
+ // with a single, truncating write. Detect these writes and record the size
+ // changes of the headers. Also, support writes to stream 0 that have
// different access patterns, as required by the API contract.
// All other clients of the Simple Cache are encouraged to use stream 1.
have_written_[0] = true;
@@ -1324,7 +1495,8 @@ int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
base::Time modification_time = base::Time::Now();
AdvanceCrc(buf, offset, buf_len, 0);
UpdateDataFromEntryStat(
- SimpleEntryStat(modification_time, modification_time, data_size_));
+ SimpleEntryStat(modification_time, modification_time, data_size_,
+ sparse_data_size_));
RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
return buf_len;
}
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.h b/chromium/net/disk_cache/simple/simple_entry_impl.h
index e2f0c63b39e..2d78d8bfe0a 100644
--- a/chromium/net/disk_cache/simple/simple_entry_impl.h
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.h
@@ -188,6 +188,21 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
const CompletionCallback& callback,
bool truncate);
+ void ReadSparseDataInternal(int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback);
+
+ void WriteSparseDataInternal(int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback);
+
+ void GetAvailableRangeInternal(int64 sparse_offset,
+ int len,
+ int64* out_start,
+ const CompletionCallback& callback);
+
void DoomEntryInternal(const CompletionCallback& callback);
// Called after a SimpleSynchronousEntry has completed CreateEntry() or
@@ -208,8 +223,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
// Internal utility method used by other completion methods. Calls
// |completion_callback| after updating state and dooming on errors.
- void EntryOperationComplete(int stream_index,
- const CompletionCallback& completion_callback,
+ void EntryOperationComplete(const CompletionCallback& completion_callback,
const SimpleEntryStat& entry_stat,
scoped_ptr<int> result);
@@ -227,6 +241,20 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
scoped_ptr<SimpleEntryStat> entry_stat,
scoped_ptr<int> result);
+ void ReadSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<base::Time> last_used,
+ scoped_ptr<int> result);
+
+ void WriteSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<SimpleEntryStat> entry_stat,
+ scoped_ptr<int> result);
+
+ void GetAvailableRangeOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<int> result);
+
// Called after an asynchronous doom completes.
void DoomOperationComplete(const CompletionCallback& callback,
State state_to_restore,
@@ -287,6 +315,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
base::Time last_used_;
base::Time last_modified_;
int32 data_size_[kSimpleEntryStreamCount];
+ int32 sparse_data_size_;
// Number of times this object has been returned from Backend::OpenEntry() and
// Backend::CreateEntry() without subsequent Entry::Close() calls. Used to
diff --git a/chromium/net/disk_cache/simple/simple_entry_operation.cc b/chromium/net/disk_cache/simple/simple_entry_operation.cc
index d4e76082084..7dfe0d2111f 100644
--- a/chromium/net/disk_cache/simple/simple_entry_operation.cc
+++ b/chromium/net/disk_cache/simple/simple_entry_operation.cc
@@ -11,13 +11,36 @@
namespace disk_cache {
+namespace {
+
+bool IsReadWriteType(unsigned int type) {
+ return type == SimpleEntryOperation::TYPE_READ ||
+ type == SimpleEntryOperation::TYPE_WRITE ||
+ type == SimpleEntryOperation::TYPE_READ_SPARSE ||
+ type == SimpleEntryOperation::TYPE_WRITE_SPARSE;
+}
+
+bool IsReadType(unsigned type) {
+ return type == SimpleEntryOperation::TYPE_READ ||
+ type == SimpleEntryOperation::TYPE_READ_SPARSE;
+}
+
+bool IsSparseType(unsigned type) {
+ return type == SimpleEntryOperation::TYPE_READ_SPARSE ||
+ type == SimpleEntryOperation::TYPE_WRITE_SPARSE;
+}
+
+}
+
SimpleEntryOperation::SimpleEntryOperation(const SimpleEntryOperation& other)
: entry_(other.entry_.get()),
buf_(other.buf_),
callback_(other.callback_),
out_entry_(other.out_entry_),
offset_(other.offset_),
+ sparse_offset_(other.sparse_offset_),
length_(other.length_),
+ out_start_(other.out_start_),
type_(other.type_),
have_index_(other.have_index_),
index_(other.index_),
@@ -40,6 +63,8 @@ SimpleEntryOperation SimpleEntryOperation::OpenOperation(
out_entry,
0,
0,
+ 0,
+ NULL,
TYPE_OPEN,
have_index,
0,
@@ -60,6 +85,8 @@ SimpleEntryOperation SimpleEntryOperation::CreateOperation(
out_entry,
0,
0,
+ 0,
+ NULL,
TYPE_CREATE,
have_index,
0,
@@ -77,6 +104,8 @@ SimpleEntryOperation SimpleEntryOperation::CloseOperation(
NULL,
0,
0,
+ 0,
+ NULL,
TYPE_CLOSE,
false,
0,
@@ -99,7 +128,9 @@ SimpleEntryOperation SimpleEntryOperation::ReadOperation(
callback,
NULL,
offset,
+ 0,
length,
+ NULL,
TYPE_READ,
false,
index,
@@ -123,7 +154,9 @@ SimpleEntryOperation SimpleEntryOperation::WriteOperation(
callback,
NULL,
offset,
+ 0,
length,
+ NULL,
TYPE_WRITE,
false,
index,
@@ -133,13 +166,84 @@ SimpleEntryOperation SimpleEntryOperation::WriteOperation(
}
// static
+SimpleEntryOperation SimpleEntryOperation::ReadSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback) {
+ return SimpleEntryOperation(entry,
+ buf,
+ callback,
+ NULL,
+ 0,
+ sparse_offset,
+ length,
+ NULL,
+ TYPE_READ_SPARSE,
+ false,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// static
+SimpleEntryOperation SimpleEntryOperation::WriteSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback) {
+ return SimpleEntryOperation(entry,
+ buf,
+ callback,
+ NULL,
+ 0,
+ sparse_offset,
+ length,
+ NULL,
+ TYPE_WRITE_SPARSE,
+ false,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// static
+SimpleEntryOperation SimpleEntryOperation::GetAvailableRangeOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ int64* out_start,
+ const CompletionCallback& callback) {
+ return SimpleEntryOperation(entry,
+ NULL,
+ callback,
+ NULL,
+ 0,
+ sparse_offset,
+ length,
+ out_start,
+ TYPE_GET_AVAILABLE_RANGE,
+ false,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// static
SimpleEntryOperation SimpleEntryOperation::DoomOperation(
SimpleEntryImpl* entry,
const CompletionCallback& callback) {
net::IOBuffer* const buf = NULL;
Entry** const out_entry = NULL;
const int offset = 0;
+ const int64 sparse_offset = 0;
const int length = 0;
+ int64* const out_start = NULL;
const bool have_index = false;
const int index = 0;
const bool truncate = false;
@@ -150,7 +254,9 @@ SimpleEntryOperation SimpleEntryOperation::DoomOperation(
callback,
out_entry,
offset,
+ sparse_offset,
length,
+ out_start,
TYPE_DOOM,
have_index,
index,
@@ -161,19 +267,38 @@ SimpleEntryOperation SimpleEntryOperation::DoomOperation(
bool SimpleEntryOperation::ConflictsWith(
const SimpleEntryOperation& other_op) const {
- if (type_ != TYPE_READ && type_ != TYPE_WRITE)
- return true;
- if (other_op.type() != TYPE_READ && other_op.type() != TYPE_WRITE)
+ EntryOperationType other_type = other_op.type();
+
+ // Non-read/write operations conflict with everything.
+ if (!IsReadWriteType(type_) || !IsReadWriteType(other_type))
return true;
- if (type() == TYPE_READ && other_op.type() == TYPE_READ)
+
+ // Reads (sparse or otherwise) conflict with nothing.
+ if (IsReadType(type_) && IsReadType(other_type))
return false;
+
+ // Sparse and non-sparse operations do not conflict with each other.
+ if (IsSparseType(type_) != IsSparseType(other_type)) {
+ return false;
+ }
+
+ // There must be two read/write operations, at least one must be a write, and
+ // they must be either both non-sparse or both sparse. Compare the streams
+ // and offsets to see whether they overlap.
+
+ if (IsSparseType(type_)) {
+ int64 end = sparse_offset_ + length_;
+ int64 other_op_end = other_op.sparse_offset() + other_op.length();
+ return sparse_offset_ < other_op_end && other_op.sparse_offset() < end;
+ }
+
if (index_ != other_op.index_)
return false;
int end = (type_ == TYPE_WRITE && truncate_) ? INT_MAX : offset_ + length_;
int other_op_end = (other_op.type() == TYPE_WRITE && other_op.truncate())
? INT_MAX
: other_op.offset() + other_op.length();
- return (offset_ < other_op_end && other_op.offset() < end);
+ return offset_ < other_op_end && other_op.offset() < end;
}
void SimpleEntryOperation::ReleaseReferences() {
@@ -187,7 +312,9 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
const CompletionCallback& callback,
Entry** out_entry,
int offset,
+ int64 sparse_offset,
int length,
+ int64* out_start,
EntryOperationType type,
bool have_index,
int index,
@@ -199,7 +326,9 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
callback_(callback),
out_entry_(out_entry),
offset_(offset),
+ sparse_offset_(sparse_offset),
length_(length),
+ out_start_(out_start),
type_(type),
have_index_(have_index),
index_(index),
diff --git a/chromium/net/disk_cache/simple/simple_entry_operation.h b/chromium/net/disk_cache/simple/simple_entry_operation.h
index 1c787017229..08863124cfc 100644
--- a/chromium/net/disk_cache/simple/simple_entry_operation.h
+++ b/chromium/net/disk_cache/simple/simple_entry_operation.h
@@ -31,7 +31,10 @@ class SimpleEntryOperation {
TYPE_CLOSE = 2,
TYPE_READ = 3,
TYPE_WRITE = 4,
- TYPE_DOOM = 5,
+ TYPE_READ_SPARSE = 5,
+ TYPE_WRITE_SPARSE = 6,
+ TYPE_GET_AVAILABLE_RANGE = 7,
+ TYPE_DOOM = 8,
};
SimpleEntryOperation(const SimpleEntryOperation& other);
@@ -63,7 +66,24 @@ class SimpleEntryOperation {
bool truncate,
bool optimistic,
const CompletionCallback& callback);
-
+ static SimpleEntryOperation ReadSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback);
+ static SimpleEntryOperation WriteSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback);
+ static SimpleEntryOperation GetAvailableRangeOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ int64* out_start,
+ const CompletionCallback& callback);
static SimpleEntryOperation DoomOperation(
SimpleEntryImpl* entry,
const CompletionCallback& callback);
@@ -81,7 +101,9 @@ class SimpleEntryOperation {
bool have_index() const { return have_index_; }
int index() const { return index_; }
int offset() const { return offset_; }
+ int64 sparse_offset() const { return sparse_offset_; }
int length() const { return length_; }
+ int64* out_start() { return out_start_; }
net::IOBuffer* buf() { return buf_.get(); }
bool truncate() const { return truncate_; }
bool optimistic() const { return optimistic_; }
@@ -93,7 +115,9 @@ class SimpleEntryOperation {
const CompletionCallback& callback,
Entry** out_entry,
int offset,
+ int64 sparse_offset,
int length,
+ int64* out_start,
EntryOperationType type,
bool have_index,
int index,
@@ -111,18 +135,22 @@ class SimpleEntryOperation {
// Used in write and read operations.
const int offset_;
+ const int64 sparse_offset_;
const int length_;
- const unsigned int type_ : 3; /* 3 */
+ // Used in get available range operations.
+ int64* const out_start_;
+
+ const EntryOperationType type_;
// Used in open and create operations.
- const unsigned int have_index_ : 1; /* 4 */
+ const bool have_index_;
// Used in write and read operations.
- const unsigned int index_ : 2; /* 6 */
+ const unsigned int index_;
// Used only in write operations.
- const unsigned int truncate_ : 1; /* 7 */
- const unsigned int optimistic_ : 1; /* 8 */
+ const bool truncate_;
+ const bool optimistic_;
// Used only in SimpleCache.ReadIsParallelizable histogram.
- const unsigned int alone_in_queue_ : 1; /* 9 */
+ const bool alone_in_queue_;
};
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_histogram_macros.h b/chromium/net/disk_cache/simple/simple_histogram_macros.h
index 2107ad466a7..f800a6f40a3 100644
--- a/chromium/net/disk_cache/simple/simple_histogram_macros.h
+++ b/chromium/net/disk_cache/simple/simple_histogram_macros.h
@@ -10,26 +10,31 @@
#include "net/base/cache_type.h"
// This file contains macros used to report histograms. The main issue is that
-// we want to have separate histograms for each type of cache (http vs app),
-// while making it easy to report histograms and have all names precomputed.
+// we want to have separate histograms for each type of cache (app, http, and
+// media), while making it easy to report histograms and have all names
+// precomputed.
#define SIMPLE_CACHE_THUNK(uma_type, args) UMA_HISTOGRAM_##uma_type args
-#define SIMPLE_CACHE_UMA(uma_type, uma_name, cache_type, ...) \
- do { \
- switch (cache_type) { \
- case net::DISK_CACHE: \
- SIMPLE_CACHE_THUNK( \
- uma_type, ("SimpleCache.Http." uma_name, ##__VA_ARGS__)); \
- break; \
- case net::APP_CACHE: \
- SIMPLE_CACHE_THUNK( \
- uma_type, ("SimpleCache.App." uma_name, ##__VA_ARGS__)); \
- break; \
- default: \
- NOTREACHED(); \
- break; \
- } \
+#define SIMPLE_CACHE_UMA(uma_type, uma_name, cache_type, ...) \
+ do { \
+ switch (cache_type) { \
+ case net::DISK_CACHE: \
+ SIMPLE_CACHE_THUNK( \
+ uma_type, ("SimpleCache.Http." uma_name, ##__VA_ARGS__)); \
+ break; \
+ case net::APP_CACHE: \
+ SIMPLE_CACHE_THUNK( \
+ uma_type, ("SimpleCache.App." uma_name, ##__VA_ARGS__)); \
+ break; \
+ case net::MEDIA_CACHE: \
+ SIMPLE_CACHE_THUNK( \
+ uma_type, ("SimpleCache.Media." uma_name, ##__VA_ARGS__)); \
+ break; \
+ default: \
+ NOTREACHED(); \
+ break; \
+ } \
} while (0)
#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_HISTOGRAM_MACROS_H_
diff --git a/chromium/net/disk_cache/simple/simple_index_file.cc b/chromium/net/disk_cache/simple/simple_index_file.cc
index c350cddb39f..abd9f7a7ae0 100644
--- a/chromium/net/disk_cache/simple/simple_index_file.cc
+++ b/chromium/net/disk_cache/simple/simple_index_file.cc
@@ -30,9 +30,6 @@ const int kEntryFilesSuffixLength = 2;
const uint64 kMaxEntiresInIndex = 100000000;
-const char kIndexFileName[] = "the-real-index";
-const char kTempIndexFileName[] = "temp-index";
-
uint32 CalculatePickleCRC(const Pickle& pickle) {
return crc32(crc32(0, Z_NULL, 0),
reinterpret_cast<const Bytef*>(pickle.payload()),
@@ -99,7 +96,7 @@ void ProcessEntryFile(SimpleIndex::EntrySet* entries,
}
base::PlatformFileInfo file_info;
- if (!file_util::GetFileInfo(file_path, &file_info)) {
+ if (!base::GetFileInfo(file_path, &file_info)) {
LOG(ERROR) << "Could not get file info for " << file_path.value();
return;
}
@@ -206,7 +203,7 @@ void SimpleIndexFile::SyncWriteToDisk(net::CacheType cache_type,
}
SerializeFinalData(cache_dir_mtime, pickle.get());
if (!WritePickleFile(pickle.get(), temp_index_filename)) {
- if (!file_util::CreateDirectory(temp_index_filename.DirName())) {
+ if (!base::CreateDirectory(temp_index_filename.DirName())) {
LOG(ERROR) << "Could not create a directory to hold the index file";
return;
}
diff --git a/chromium/net/disk_cache/simple/simple_index_file.h b/chromium/net/disk_cache/simple/simple_index_file.h
index ce19e2bb56f..756294121c9 100644
--- a/chromium/net/disk_cache/simple/simple_index_file.h
+++ b/chromium/net/disk_cache/simple/simple_index_file.h
@@ -40,15 +40,15 @@ struct NET_EXPORT_PRIVATE SimpleIndexLoadResult {
};
// Simple Index File format is a pickle serialized data of IndexMetadata and
-// EntryMetadata objects. The file format is as follows: one instance of
+// EntryMetadata objects. The file format is as follows: one instance of
// serialized |IndexMetadata| followed serialized |EntryMetadata| entries
-// repeated |number_of_entries| amount of times. To know more about the format,
+// repeated |number_of_entries| amount of times. To know more about the format,
// see SimpleIndexFile::Serialize() and SeeSimpleIndexFile::LoadFromDisk()
// methods.
//
-// The non-static methods must run on the IO thread. All the real
+// The non-static methods must run on the IO thread. All the real
// work is done in the static methods, which are run on the cache thread
-// or in worker threads. Synchronization between methods is the
+// or in worker threads. Synchronization between methods is the
// responsibility of the caller.
class NET_EXPORT_PRIVATE SimpleIndexFile {
public:
diff --git a/chromium/net/disk_cache/simple/simple_index_file_unittest.cc b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
index 0e9c2e8155b..17aa595bba9 100644
--- a/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
@@ -11,13 +11,18 @@
#include "base/pickle.h"
#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
#include "base/time/time.h"
#include "net/base/cache_type.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/simple/simple_backend_impl.h"
#include "net/disk_cache/simple/simple_backend_version.h"
#include "net/disk_cache/simple/simple_entry_format.h"
#include "net/disk_cache/simple/simple_index.h"
#include "net/disk_cache/simple/simple_index_file.h"
#include "net/disk_cache/simple/simple_util.h"
+#include "net/disk_cache/simple/simple_version_upgrade.h"
#include "testing/gtest/include/gtest/gtest.h"
using base::Time;
@@ -81,7 +86,7 @@ class WrappedSimpleIndexFile : public SimpleIndexFile {
}
bool CreateIndexFileDirectory() const {
- return file_util::CreateDirectory(index_file_.DirName());
+ return base::CreateDirectory(index_file_.DirName());
}
};
@@ -173,13 +178,13 @@ TEST_F(SimpleIndexFileTest, LegacyIsIndexFileStale) {
const base::Time past_time = base::Time::Now() -
base::TimeDelta::FromSeconds(10);
- EXPECT_TRUE(file_util::TouchFile(index_path, past_time, past_time));
- EXPECT_TRUE(file_util::TouchFile(cache_path, past_time, past_time));
+ EXPECT_TRUE(base::TouchFile(index_path, past_time, past_time));
+ EXPECT_TRUE(base::TouchFile(cache_path, past_time, past_time));
ASSERT_TRUE(simple_util::GetMTime(cache_path, &cache_mtime));
EXPECT_FALSE(
WrappedSimpleIndexFile::LegacyIsIndexFileStale(cache_mtime, index_path));
const base::Time even_older = past_time - base::TimeDelta::FromSeconds(10);
- EXPECT_TRUE(file_util::TouchFile(index_path, even_older, even_older));
+ EXPECT_TRUE(base::TouchFile(index_path, even_older, even_older));
EXPECT_TRUE(
WrappedSimpleIndexFile::LegacyIsIndexFileStale(cache_mtime, index_path));
}
@@ -257,6 +262,84 @@ TEST_F(SimpleIndexFileTest, LoadCorruptIndex) {
EXPECT_TRUE(load_index_result.flush_required);
}
+// Tests that after an upgrade the backend has the index file put in place.
+TEST_F(SimpleIndexFileTest, SimpleCacheUpgrade) {
+ base::ScopedTempDir cache_dir;
+ ASSERT_TRUE(cache_dir.CreateUniqueTempDir());
+ const base::FilePath cache_path = cache_dir.path();
+
+ // Write an old fake index file.
+ base::PlatformFileError error;
+ base::PlatformFile file = base::CreatePlatformFile(
+ cache_path.AppendASCII("index"),
+ base::PLATFORM_FILE_CREATE | base::PLATFORM_FILE_WRITE,
+ NULL,
+ &error);
+ disk_cache::FakeIndexData file_contents;
+ file_contents.initial_magic_number = disk_cache::kSimpleInitialMagicNumber;
+ file_contents.version = 5;
+ int bytes_written = base::WritePlatformFile(
+ file, 0, reinterpret_cast<char*>(&file_contents), sizeof(file_contents));
+ ASSERT_TRUE(base::ClosePlatformFile(file));
+ ASSERT_EQ((int)sizeof(file_contents), bytes_written);
+
+ // Write the index file. The format is incorrect, but for transitioning from
+ // v5 it does not matter.
+ const std::string index_file_contents("incorrectly serialized data");
+ const base::FilePath old_index_file =
+ cache_path.AppendASCII("the-real-index");
+ ASSERT_EQ(implicit_cast<int>(index_file_contents.size()),
+ file_util::WriteFile(old_index_file,
+ index_file_contents.data(),
+ index_file_contents.size()));
+
+ // Upgrade the cache.
+ ASSERT_TRUE(disk_cache::UpgradeSimpleCacheOnDisk(cache_path));
+
+ // Create the backend and initiate index flush by destroying the backend.
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ disk_cache::SimpleBackendImpl* simple_cache =
+ new disk_cache::SimpleBackendImpl(cache_path,
+ 0,
+ net::DISK_CACHE,
+ cache_thread.message_loop_proxy().get(),
+ NULL);
+ net::TestCompletionCallback cb;
+ int rv = simple_cache->Init(cb.callback());
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+ rv = simple_cache->index()->ExecuteWhenReady(cb.callback());
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+ delete simple_cache;
+
+ // The backend flushes the index on destruction and does so on the cache
+ // thread, wait for the flushing to finish by posting a callback to the cache
+ // thread after that.
+ MessageLoopHelper helper;
+ CallbackTest cb_shutdown(&helper, false);
+ cache_thread.message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&CallbackTest::Run, base::Unretained(&cb_shutdown), net::OK));
+ helper.WaitUntilCacheIoFinished(1);
+
+ // Verify that the index file exists.
+ const base::FilePath& index_file_path =
+ cache_path.AppendASCII("index-dir").AppendASCII("the-real-index");
+ EXPECT_TRUE(base::PathExists(index_file_path));
+
+ // Verify that the version of the index file is correct.
+ std::string contents;
+ EXPECT_TRUE(base::ReadFileToString(index_file_path, &contents));
+ base::Time when_index_last_saw_cache;
+ SimpleIndexLoadResult deserialize_result;
+ WrappedSimpleIndexFile::Deserialize(contents.data(),
+ contents.size(),
+ &when_index_last_saw_cache,
+ &deserialize_result);
+ EXPECT_TRUE(deserialize_result.did_load);
+}
+
#endif // defined(OS_POSIX)
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_index_unittest.cc b/chromium/net/disk_cache/simple/simple_index_unittest.cc
index 47ae24f62fe..cee7bf33c4d 100644
--- a/chromium/net/disk_cache/simple/simple_index_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_index_unittest.cc
@@ -538,7 +538,7 @@ TEST_F(SimpleIndexTest, BasicEviction) {
// Trigger an eviction, and make sure the right things are tossed.
// TODO(rdsmith): This is dependent on the innards of the implementation
- // as to at exactly what point we trigger eviction. Not sure how to fix
+ // as to at exactly what point we trigger eviction. Not sure how to fix
// that.
index()->UpdateEntrySize(hashes_.at<3>(), 475);
EXPECT_EQ(1, doom_entries_calls());
diff --git a/chromium/net/disk_cache/simple/simple_net_log_parameters.h b/chromium/net/disk_cache/simple/simple_net_log_parameters.h
index b6f386f4a99..41ea9a2af91 100644
--- a/chromium/net/disk_cache/simple/simple_net_log_parameters.h
+++ b/chromium/net/disk_cache/simple/simple_net_log_parameters.h
@@ -14,14 +14,14 @@ namespace disk_cache {
class SimpleEntryImpl;
// Creates a NetLog callback that returns parameters for the construction of a
-// SimpleEntryImpl. Contains the entry's hash. |entry| can't be NULL and must
+// SimpleEntryImpl. Contains the entry's hash. |entry| can't be NULL and must
// outlive the returned callback.
net::NetLog::ParametersCallback CreateNetLogSimpleEntryConstructionCallback(
const SimpleEntryImpl* entry);
// Creates a NetLog callback that returns parameters for the result of calling
-// |CreateEntry| or |OpenEntry| on a SimpleEntryImpl. Contains the |net_error|
-// and, if successful, the entry's key. |entry| can't be NULL and must outlive
+// |CreateEntry| or |OpenEntry| on a SimpleEntryImpl. Contains the |net_error|
+// and, if successful, the entry's key. |entry| can't be NULL and must outlive
// the returned callback.
net::NetLog::ParametersCallback CreateNetLogSimpleEntryCreationCallback(
const SimpleEntryImpl* entry,
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
index 38e8a3cae99..81f52e15b7d 100644
--- a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
@@ -34,6 +34,7 @@ using base::PLATFORM_FILE_ERROR_EXISTS;
using base::PLATFORM_FILE_ERROR_NOT_FOUND;
using base::PLATFORM_FILE_OK;
using base::PLATFORM_FILE_OPEN;
+using base::PLATFORM_FILE_OPEN_ALWAYS;
using base::PLATFORM_FILE_READ;
using base::PLATFORM_FILE_WRITE;
using base::ReadPlatformFile;
@@ -53,7 +54,8 @@ enum OpenEntryResult {
OPEN_ENTRY_CANT_READ_KEY = 5,
// OPEN_ENTRY_KEY_MISMATCH = 6, Deprecated.
OPEN_ENTRY_KEY_HASH_MISMATCH = 7,
- OPEN_ENTRY_MAX = 8,
+ OPEN_ENTRY_SPARSE_OPEN_FAILED = 8,
+ OPEN_ENTRY_MAX = 9,
};
// Used in histograms, please only add entries at the end.
@@ -128,15 +130,18 @@ namespace disk_cache {
using simple_util::GetEntryHashKey;
using simple_util::GetFilenameFromEntryHashAndFileIndex;
+using simple_util::GetSparseFilenameFromEntryHash;
using simple_util::GetDataSizeFromKeyAndFileSize;
using simple_util::GetFileSizeFromKeyAndDataSize;
using simple_util::GetFileIndexFromStreamIndex;
SimpleEntryStat::SimpleEntryStat(base::Time last_used,
base::Time last_modified,
- const int32 data_size[])
+ const int32 data_size[],
+ const int32 sparse_data_size)
: last_used_(last_used),
- last_modified_(last_modified) {
+ last_modified_(last_modified),
+ sparse_data_size_(sparse_data_size) {
memcpy(data_size_, data_size, sizeof(data_size_));
}
@@ -211,6 +216,12 @@ SimpleSynchronousEntry::EntryOperationData::EntryOperationData(int index_p,
truncate(truncate_p),
doomed(doomed_p) {}
+SimpleSynchronousEntry::EntryOperationData::EntryOperationData(
+ int64 sparse_offset_p,
+ int buf_len_p)
+ : sparse_offset(sparse_offset_p),
+ buf_len(buf_len_p) {}
+
// static
void SimpleSynchronousEntry::OpenEntry(
net::CacheType cache_type,
@@ -391,6 +402,213 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
*out_result = buf_len;
}
+void SimpleSynchronousEntry::ReadSparseData(
+ const EntryOperationData& in_entry_op,
+ net::IOBuffer* out_buf,
+ base::Time* out_last_used,
+ int* out_result) {
+ DCHECK(initialized_);
+ int64 offset = in_entry_op.sparse_offset;
+ int buf_len = in_entry_op.buf_len;
+
+ char* buf = out_buf->data();
+ int read_so_far = 0;
+
+ // Find the first sparse range at or after the requested offset.
+ SparseRangeIterator it = sparse_ranges_.lower_bound(offset);
+
+ if (it != sparse_ranges_.begin()) {
+ // Hop back one range and read the one overlapping with the start.
+ --it;
+ SparseRange* found_range = &it->second;
+ DCHECK_EQ(it->first, found_range->offset);
+ if (found_range->offset + found_range->length > offset) {
+ DCHECK_LE(0, found_range->length);
+ DCHECK_GE(kint32max, found_range->length);
+ DCHECK_LE(0, offset - found_range->offset);
+ DCHECK_GE(kint32max, offset - found_range->offset);
+ int range_len_after_offset = found_range->length -
+ (offset - found_range->offset);
+ DCHECK_LE(0, range_len_after_offset);
+
+ int len_to_read = std::min(buf_len, range_len_after_offset);
+ if (!ReadSparseRange(found_range,
+ offset - found_range->offset,
+ len_to_read,
+ buf)) {
+ *out_result = net::ERR_CACHE_READ_FAILURE;
+ return;
+ }
+ read_so_far += len_to_read;
+ }
+ ++it;
+ }
+
+ // Keep reading until the buffer is full or there is not another contiguous
+ // range.
+ while (read_so_far < buf_len &&
+ it != sparse_ranges_.end() &&
+ it->second.offset == offset + read_so_far) {
+ SparseRange* found_range = &it->second;
+ DCHECK_EQ(it->first, found_range->offset);
+ int range_len = (found_range->length > kint32max) ?
+ kint32max : found_range->length;
+ int len_to_read = std::min(buf_len - read_so_far, range_len);
+ if (!ReadSparseRange(found_range, 0, len_to_read, buf + read_so_far)) {
+ *out_result = net::ERR_CACHE_READ_FAILURE;
+ return;
+ }
+ read_so_far += len_to_read;
+ ++it;
+ }
+
+ *out_result = read_so_far;
+}
+
+void SimpleSynchronousEntry::WriteSparseData(
+ const EntryOperationData& in_entry_op,
+ net::IOBuffer* in_buf,
+ int64 max_sparse_data_size,
+ SimpleEntryStat* out_entry_stat,
+ int* out_result) {
+ DCHECK(initialized_);
+ int64 offset = in_entry_op.sparse_offset;
+ int buf_len = in_entry_op.buf_len;
+
+ const char* buf = in_buf->data();
+ int written_so_far = 0;
+ int appended_so_far = 0;
+
+ if (!sparse_file_open() && !CreateSparseFile()) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+
+ int64 sparse_data_size = out_entry_stat->sparse_data_size();
+ // This is a pessimistic estimate; it assumes the entire buffer is going to
+ // be appended as a new range, not written over existing ranges.
+ if (sparse_data_size + buf_len > max_sparse_data_size) {
+ DLOG(INFO) << "Truncating sparse data file (" << sparse_data_size << " + "
+ << buf_len << " > " << max_sparse_data_size << ")";
+ TruncateSparseFile();
+ }
+
+ SparseRangeIterator it = sparse_ranges_.lower_bound(offset);
+
+ if (it != sparse_ranges_.begin()) {
+ --it;
+ SparseRange* found_range = &it->second;
+ if (found_range->offset + found_range->length > offset) {
+ DCHECK_LE(0, found_range->length);
+ DCHECK_GE(kint32max, found_range->length);
+ DCHECK_LE(0, offset - found_range->offset);
+ DCHECK_GE(kint32max, offset - found_range->offset);
+ int range_len_after_offset = found_range->length -
+ (offset - found_range->offset);
+ DCHECK_LE(0, range_len_after_offset);
+
+ int len_to_write = std::min(buf_len, range_len_after_offset);
+ if (!WriteSparseRange(found_range,
+ offset - found_range->offset,
+ len_to_write,
+ buf)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_write;
+ }
+ ++it;
+ }
+
+ while (written_so_far < buf_len &&
+ it != sparse_ranges_.end() &&
+ it->second.offset < offset + buf_len) {
+ SparseRange* found_range = &it->second;
+ if (offset + written_so_far < found_range->offset) {
+ int len_to_append = found_range->offset - (offset + written_so_far);
+ if (!AppendSparseRange(offset + written_so_far,
+ len_to_append,
+ buf + written_so_far)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_append;
+ appended_so_far += len_to_append;
+ }
+ int range_len = (found_range->length > kint32max) ?
+ kint32max : found_range->length;
+ int len_to_write = std::min(buf_len - written_so_far, range_len);
+ if (!WriteSparseRange(found_range,
+ 0,
+ len_to_write,
+ buf + written_so_far)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_write;
+ ++it;
+ }
+
+ if (written_so_far < buf_len) {
+ int len_to_append = buf_len - written_so_far;
+ if (!AppendSparseRange(offset + written_so_far,
+ len_to_append,
+ buf + written_so_far)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_append;
+ appended_so_far += len_to_append;
+ }
+
+ DCHECK_EQ(buf_len, written_so_far);
+
+ base::Time modification_time = Time::Now();
+ out_entry_stat->set_last_used(modification_time);
+ out_entry_stat->set_last_modified(modification_time);
+ int32 old_sparse_data_size = out_entry_stat->sparse_data_size();
+ out_entry_stat->set_sparse_data_size(old_sparse_data_size + appended_so_far);
+ *out_result = written_so_far;
+}
+
+void SimpleSynchronousEntry::GetAvailableRange(
+ const EntryOperationData& in_entry_op,
+ int64* out_start,
+ int* out_result) {
+ DCHECK(initialized_);
+ int64 offset = in_entry_op.sparse_offset;
+ int len = in_entry_op.buf_len;
+
+ SparseRangeIterator it = sparse_ranges_.lower_bound(offset);
+
+ int64 start = offset;
+ int avail_so_far = 0;
+
+ if (it != sparse_ranges_.end() && it->second.offset < offset + len)
+ start = it->second.offset;
+
+ if ((it == sparse_ranges_.end() || it->second.offset > offset) &&
+ it != sparse_ranges_.begin()) {
+ --it;
+ if (it->second.offset + it->second.length > offset) {
+ start = offset;
+ avail_so_far = (it->second.offset + it->second.length) - offset;
+ }
+ ++it;
+ }
+
+ while (start + avail_so_far < offset + len &&
+ it != sparse_ranges_.end() &&
+ it->second.offset == start + avail_so_far) {
+ avail_so_far += it->second.length;
+ ++it;
+ }
+
+ int len_from_start = len - (start - offset);
+ *out_start = start;
+ *out_result = std::min(avail_so_far, len_from_start);
+}
+
void SimpleSynchronousEntry::CheckEOFRecord(int index,
const SimpleEntryStat& entry_stat,
uint32 expected_crc32,
@@ -482,6 +700,16 @@ void SimpleSynchronousEntry::Close(
cluster_loss * 100 / (cluster_loss + file_size));
}
+ if (sparse_file_open()) {
+ bool did_close_file = ClosePlatformFile(sparse_file_);
+ CHECK(did_close_file);
+ }
+
+ if (files_created_) {
+ const int stream2_file_index = GetFileIndexFromStreamIndex(2);
+ SIMPLE_CACHE_UMA(BOOLEAN, "EntryCreatedAndStream2Omitted", cache_type_,
+ empty_file_omitted_[stream2_file_index]);
+ }
RecordCloseResult(cache_type_, CLOSE_RESULT_SUCCESS);
have_open_files_ = false;
delete this;
@@ -496,7 +724,8 @@ SimpleSynchronousEntry::SimpleSynchronousEntry(net::CacheType cache_type,
entry_hash_(entry_hash),
key_(key),
have_open_files_(false),
- initialized_(false) {
+ initialized_(false),
+ sparse_file_(kInvalidPlatformFileValue) {
for (int i = 0; i < kSimpleEntryFileCount; ++i) {
files_[i] = kInvalidPlatformFileValue;
empty_file_omitted_[i] = false;
@@ -622,6 +851,8 @@ bool SimpleSynchronousEntry::OpenFiles(
"SyncOpenEntryAge", cache_type_,
entry_age.InHours(), 1, 1000, 50);
+ files_created_ = false;
+
return true;
}
@@ -661,6 +892,8 @@ bool SimpleSynchronousEntry::CreateFiles(
for (int i = 0; i < kSimpleEntryStreamCount; ++i)
out_entry_stat->set_data_size(i, 0);
+ files_created_ = true;
+
return true;
}
@@ -673,6 +906,11 @@ void SimpleSynchronousEntry::CloseFile(int index) {
DCHECK(did_close);
files_[index] = kInvalidPlatformFileValue;
}
+
+ if (sparse_file_open()) {
+ bool did_close = CloseSparseFile();
+ DCHECK(did_close);
+ }
}
void SimpleSynchronousEntry::CloseFiles() {
@@ -754,16 +992,29 @@ int SimpleSynchronousEntry::InitializeForOpen(
}
}
- const int third_stream_file_index = GetFileIndexFromStreamIndex(2);
- DCHECK(CanOmitEmptyFile(third_stream_file_index));
- if (!empty_file_omitted_[third_stream_file_index] &&
+ int32 sparse_data_size = 0;
+ if (!OpenSparseFileIfExists(&sparse_data_size)) {
+ RecordSyncOpenResult(
+ cache_type_, OPEN_ENTRY_SPARSE_OPEN_FAILED, had_index);
+ return net::ERR_FAILED;
+ }
+ out_entry_stat->set_sparse_data_size(sparse_data_size);
+
+ bool removed_stream2 = false;
+ const int stream2_file_index = GetFileIndexFromStreamIndex(2);
+ DCHECK(CanOmitEmptyFile(stream2_file_index));
+ if (!empty_file_omitted_[stream2_file_index] &&
out_entry_stat->data_size(2) == 0) {
- DLOG(INFO) << "Removing empty third stream file.";
- CloseFile(third_stream_file_index);
- DeleteFileForEntryHash(path_, entry_hash_, third_stream_file_index);
- empty_file_omitted_[third_stream_file_index] = true;
+ DLOG(INFO) << "Removing empty stream 2 file.";
+ CloseFile(stream2_file_index);
+ DeleteFileForEntryHash(path_, entry_hash_, stream2_file_index);
+ empty_file_omitted_[stream2_file_index] = true;
+ removed_stream2 = true;
}
+ SIMPLE_CACHE_UMA(BOOLEAN, "EntryOpenedAndStream2Removed", cache_type_,
+ removed_stream2);
+
RecordSyncOpenResult(cache_type_, OPEN_ENTRY_SUCCESS, had_index);
initialized_ = true;
return net::OK;
@@ -905,6 +1156,7 @@ void SimpleSynchronousEntry::Doom() const {
DeleteFilesForEntryHash(path_, entry_hash_);
}
+// static
bool SimpleSynchronousEntry::DeleteFileForEntryHash(
const FilePath& path,
const uint64 entry_hash,
@@ -914,6 +1166,7 @@ bool SimpleSynchronousEntry::DeleteFileForEntryHash(
return base::DeleteFile(to_delete, false);
}
+// static
bool SimpleSynchronousEntry::DeleteFilesForEntryHash(
const FilePath& path,
const uint64 entry_hash) {
@@ -922,6 +1175,9 @@ bool SimpleSynchronousEntry::DeleteFilesForEntryHash(
if (!DeleteFileForEntryHash(path, entry_hash, i) && !CanOmitEmptyFile(i))
result = false;
}
+ FilePath to_delete = path.AppendASCII(
+ GetSparseFilenameFromEntryHash(entry_hash));
+ base::DeleteFile(to_delete, false);
return result;
}
@@ -946,4 +1202,277 @@ FilePath SimpleSynchronousEntry::GetFilenameFromFileIndex(int file_index) {
GetFilenameFromEntryHashAndFileIndex(entry_hash_, file_index));
}
+bool SimpleSynchronousEntry::OpenSparseFileIfExists(
+ int32* out_sparse_data_size) {
+ DCHECK(!sparse_file_open());
+
+ FilePath filename = path_.AppendASCII(
+ GetSparseFilenameFromEntryHash(entry_hash_));
+ int flags = PLATFORM_FILE_OPEN | PLATFORM_FILE_READ | PLATFORM_FILE_WRITE;
+ bool created;
+ PlatformFileError error;
+ sparse_file_ = CreatePlatformFile(filename, flags, &created, &error);
+ if (error == PLATFORM_FILE_ERROR_NOT_FOUND)
+ return true;
+
+ return ScanSparseFile(out_sparse_data_size);
+}
+
+bool SimpleSynchronousEntry::CreateSparseFile() {
+ DCHECK(!sparse_file_open());
+
+ FilePath filename = path_.AppendASCII(
+ GetSparseFilenameFromEntryHash(entry_hash_));
+ int flags = PLATFORM_FILE_CREATE | PLATFORM_FILE_READ | PLATFORM_FILE_WRITE;
+ bool created;
+ PlatformFileError error;
+ sparse_file_ = CreatePlatformFile(filename, flags, &created, &error);
+ if (error != PLATFORM_FILE_OK)
+ return false;
+
+ return InitializeSparseFile();
+}
+
+bool SimpleSynchronousEntry::CloseSparseFile() {
+ DCHECK(sparse_file_open());
+
+ bool did_close = ClosePlatformFile(sparse_file_);
+ if (did_close)
+ sparse_file_ = kInvalidPlatformFileValue;
+ return did_close;
+}
+
+bool SimpleSynchronousEntry::TruncateSparseFile() {
+ DCHECK(sparse_file_open());
+
+ int64 header_and_key_length = sizeof(SimpleFileHeader) + key_.size();
+ if (!TruncatePlatformFile(sparse_file_, header_and_key_length)) {
+ DLOG(WARNING) << "Could not truncate sparse file";
+ return false;
+ }
+
+ sparse_ranges_.clear();
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::InitializeSparseFile() {
+ DCHECK(sparse_file_open());
+
+ SimpleFileHeader header;
+ header.initial_magic_number = kSimpleInitialMagicNumber;
+ header.version = kSimpleVersion;
+ header.key_length = key_.size();
+ header.key_hash = base::Hash(key_);
+
+ int header_write_result =
+ WritePlatformFile(sparse_file_, 0, reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (header_write_result != sizeof(header)) {
+ DLOG(WARNING) << "Could not write sparse file header";
+ return false;
+ }
+
+ int key_write_result = WritePlatformFile(sparse_file_, sizeof(header),
+ key_.data(), key_.size());
+ if (key_write_result != implicit_cast<int>(key_.size())) {
+ DLOG(WARNING) << "Could not write sparse file key";
+ return false;
+ }
+
+ sparse_ranges_.clear();
+ sparse_tail_offset_ = sizeof(header) + key_.size();
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::ScanSparseFile(int32* out_sparse_data_size) {
+ DCHECK(sparse_file_open());
+
+ int32 sparse_data_size = 0;
+
+ SimpleFileHeader header;
+ int header_read_result =
+ ReadPlatformFile(sparse_file_, 0, reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (header_read_result != sizeof(header)) {
+ DLOG(WARNING) << "Could not read header from sparse file.";
+ return false;
+ }
+
+ if (header.initial_magic_number != kSimpleInitialMagicNumber) {
+ DLOG(WARNING) << "Sparse file magic number did not match.";
+ return false;
+ }
+
+ if (header.version != kSimpleVersion) {
+ DLOG(WARNING) << "Sparse file unreadable version.";
+ return false;
+ }
+
+ sparse_ranges_.clear();
+
+ int64 range_header_offset = sizeof(header) + key_.size();
+ while (1) {
+ SimpleFileSparseRangeHeader range_header;
+ int range_header_read_result =
+ ReadPlatformFile(sparse_file_,
+ range_header_offset,
+ reinterpret_cast<char*>(&range_header),
+ sizeof(range_header));
+ if (range_header_read_result == 0)
+ break;
+ if (range_header_read_result != sizeof(range_header)) {
+ DLOG(WARNING) << "Could not read sparse range header.";
+ return false;
+ }
+
+ if (range_header.sparse_range_magic_number !=
+ kSimpleSparseRangeMagicNumber) {
+ DLOG(WARNING) << "Invalid sparse range header magic number.";
+ return false;
+ }
+
+ SparseRange range;
+ range.offset = range_header.offset;
+ range.length = range_header.length;
+ range.data_crc32 = range_header.data_crc32;
+ range.file_offset = range_header_offset + sizeof(range_header);
+ sparse_ranges_.insert(std::make_pair(range.offset, range));
+
+ range_header_offset += sizeof(range_header) + range.length;
+
+ DCHECK_LE(sparse_data_size, sparse_data_size + range.length);
+ sparse_data_size += range.length;
+ }
+
+ *out_sparse_data_size = sparse_data_size;
+ sparse_tail_offset_ = range_header_offset;
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::ReadSparseRange(const SparseRange* range,
+ int offset, int len, char* buf) {
+ DCHECK(range);
+ DCHECK(buf);
+ DCHECK_GE(range->length, offset);
+ DCHECK_GE(range->length, offset + len);
+
+ int bytes_read = ReadPlatformFile(sparse_file_,
+ range->file_offset + offset,
+ buf, len);
+ if (bytes_read < len) {
+ DLOG(WARNING) << "Could not read sparse range.";
+ return false;
+ }
+
+ // If we read the whole range and we have a crc32, check it.
+ if (offset == 0 && len == range->length && range->data_crc32 != 0) {
+ uint32 actual_crc32 = crc32(crc32(0L, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(buf),
+ len);
+ if (actual_crc32 != range->data_crc32) {
+ DLOG(WARNING) << "Sparse range crc32 mismatch.";
+ return false;
+ }
+ }
+ // TODO(ttuttle): Incremental crc32 calculation?
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::WriteSparseRange(SparseRange* range,
+ int offset, int len,
+ const char* buf) {
+ DCHECK(range);
+ DCHECK(buf);
+ DCHECK_GE(range->length, offset);
+ DCHECK_GE(range->length, offset + len);
+
+ uint32 new_crc32 = 0;
+ if (offset == 0 && len == range->length) {
+ new_crc32 = crc32(crc32(0L, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(buf),
+ len);
+ }
+
+ if (new_crc32 != range->data_crc32) {
+ range->data_crc32 = new_crc32;
+
+ SimpleFileSparseRangeHeader header;
+ header.sparse_range_magic_number = kSimpleSparseRangeMagicNumber;
+ header.offset = range->offset;
+ header.length = range->length;
+ header.data_crc32 = range->data_crc32;
+
+ int bytes_written = WritePlatformFile(sparse_file_,
+ range->file_offset - sizeof(header),
+ reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (bytes_written != implicit_cast<int>(sizeof(header))) {
+ DLOG(WARNING) << "Could not rewrite sparse range header.";
+ return false;
+ }
+ }
+
+ int bytes_written = WritePlatformFile(sparse_file_,
+ range->file_offset + offset,
+ buf, len);
+ if (bytes_written < len) {
+ DLOG(WARNING) << "Could not write sparse range.";
+ return false;
+ }
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::AppendSparseRange(int64 offset,
+ int len,
+ const char* buf) {
+ DCHECK_LE(0, offset);
+ DCHECK_LT(0, len);
+ DCHECK(buf);
+
+ uint32 data_crc32 = crc32(crc32(0L, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(buf),
+ len);
+
+ SimpleFileSparseRangeHeader header;
+ header.sparse_range_magic_number = kSimpleSparseRangeMagicNumber;
+ header.offset = offset;
+ header.length = len;
+ header.data_crc32 = data_crc32;
+
+ int bytes_written = WritePlatformFile(sparse_file_,
+ sparse_tail_offset_,
+ reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (bytes_written != implicit_cast<int>(sizeof(header))) {
+ DLOG(WARNING) << "Could not append sparse range header.";
+ return false;
+ }
+ sparse_tail_offset_ += bytes_written;
+
+ bytes_written = WritePlatformFile(sparse_file_,
+ sparse_tail_offset_,
+ buf,
+ len);
+ if (bytes_written < len) {
+ DLOG(WARNING) << "Could not append sparse range data.";
+ return false;
+ }
+ int64 data_file_offset = sparse_tail_offset_;
+ sparse_tail_offset_ += bytes_written;
+
+ SparseRange range;
+ range.offset = offset;
+ range.length = len;
+ range.data_crc32 = data_crc32;
+ range.file_offset = data_file_offset;
+ sparse_ranges_.insert(std::make_pair(offset, range));
+
+ return true;
+}
+
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.h b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
index 470e8e20bd8..2ae4c0d87b6 100644
--- a/chromium/net/disk_cache/simple/simple_synchronous_entry.h
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
@@ -6,6 +6,7 @@
#define NET_DISK_CACHE_SIMPLE_SIMPLE_SYNCHRONOUS_ENTRY_H_
#include <algorithm>
+#include <map>
#include <string>
#include <utility>
#include <vector>
@@ -35,7 +36,8 @@ class NET_EXPORT_PRIVATE SimpleEntryStat {
public:
SimpleEntryStat(base::Time last_used,
base::Time last_modified,
- const int32 data_size[]);
+ const int32 data_size[],
+ const int32 sparse_data_size);
int GetOffsetInFile(const std::string& key,
int offset,
@@ -56,10 +58,16 @@ class NET_EXPORT_PRIVATE SimpleEntryStat {
data_size_[stream_index] = data_size;
}
+ int32 sparse_data_size() const { return sparse_data_size_; }
+ void set_sparse_data_size(int32 sparse_data_size) {
+ sparse_data_size_ = sparse_data_size;
+ }
+
private:
base::Time last_used_;
base::Time last_modified_;
int32 data_size_[kSimpleEntryStreamCount];
+ int32 sparse_data_size_;
};
struct SimpleEntryCreationResults {
@@ -94,9 +102,11 @@ class SimpleSynchronousEntry {
int buf_len_p,
bool truncate_p,
bool doomed_p);
+ EntryOperationData(int64 sparse_offset_p, int buf_len_p);
int index;
int offset;
+ int64 sparse_offset;
int buf_len;
bool truncate;
bool doomed;
@@ -142,6 +152,19 @@ class SimpleSynchronousEntry {
uint32 expected_crc32,
int* out_result) const;
+ void ReadSparseData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* out_buf,
+ base::Time* out_last_used,
+ int* out_result);
+ void WriteSparseData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* in_buf,
+ int64 max_sparse_data_size,
+ SimpleEntryStat* out_entry_stat,
+ int* out_result);
+ void GetAvailableRange(const EntryOperationData& in_entry_op,
+ int64* out_start,
+ int* out_result);
+
// Close all streams, and add write EOF records to streams indicated by the
// CRCRecord entries in |crc32s_to_write|.
void Close(const SimpleEntryStat& entry_stat,
@@ -165,6 +188,17 @@ class SimpleSynchronousEntry {
FILE_REQUIRED
};
+ struct SparseRange {
+ int64 offset;
+ int64 length;
+ uint32 data_crc32;
+ int64 file_offset;
+
+ bool operator<(const SparseRange& other) const {
+ return offset < other.offset;
+ }
+ };
+
SimpleSynchronousEntry(
net::CacheType cache_type,
const base::FilePath& path,
@@ -175,12 +209,12 @@ class SimpleSynchronousEntry {
// called.
~SimpleSynchronousEntry();
- // Tries to open one of the cache entry files. Succeeds if the open succeeds
+ // Tries to open one of the cache entry files. Succeeds if the open succeeds
// or if the file was not found and is allowed to be omitted if the
// corresponding stream is empty.
bool MaybeOpenFile(int file_index,
base::PlatformFileError* out_error);
- // Creates one of the cache entry files if necessary. If the file is allowed
+ // Creates one of the cache entry files if necessary. If the file is allowed
// to be omitted if the corresponding stream is empty, and if |file_required|
// is FILE_NOT_REQUIRED, then the file is not created; otherwise, it is.
bool MaybeCreateFile(int file_index,
@@ -193,7 +227,7 @@ class SimpleSynchronousEntry {
void CloseFile(int index);
void CloseFiles();
- // Returns a net error, i.e. net::OK on success. |had_index| is passed
+ // Returns a net error, i.e. net::OK on success. |had_index| is passed
// from the main entry for metrics purposes, and is true if the index was
// initialized when the open operation began.
int InitializeForOpen(bool had_index,
@@ -201,13 +235,13 @@ class SimpleSynchronousEntry {
scoped_refptr<net::GrowableIOBuffer>* stream_0_data,
uint32* out_stream_0_crc32);
- // Writes the header and key to a newly-created stream file. |index| is the
- // index of the stream. Returns true on success; returns false and sets
+ // Writes the header and key to a newly-created stream file. |index| is the
+ // index of the stream. Returns true on success; returns false and sets
// |*out_result| on failure.
bool InitializeCreatedFile(int index, CreateEntryResult* out_result);
// Returns a net error, including net::OK on success and net::FILE_EXISTS
- // when the entry already exists. |had_index| is passed from the main entry
+ // when the entry already exists. |had_index| is passed from the main entry
// for metrics purposes, and is true if the index was initialized when the
// create operation began.
int InitializeForCreate(bool had_index, SimpleEntryStat* out_entry_stat);
@@ -227,6 +261,39 @@ class SimpleSynchronousEntry {
int* out_data_size) const;
void Doom() const;
+ // Opens the sparse data file and scans it if it exists.
+ bool OpenSparseFileIfExists(int32* out_sparse_data_size);
+
+ // Creates and initializes the sparse data file.
+ bool CreateSparseFile();
+
+ // Closes the sparse data file.
+ bool CloseSparseFile();
+
+ // Writes the header to the (newly-created) sparse file.
+ bool InitializeSparseFile();
+
+ // Removes all but the header of the sparse file.
+ bool TruncateSparseFile();
+
+ // Scans the existing ranges in the sparse file. Populates |sparse_ranges_|
+ // and sets |*out_sparse_data_size| to the total size of all the ranges (not
+ // including headers).
+ bool ScanSparseFile(int32* out_sparse_data_size);
+
+ // Reads from a single sparse range. If asked to read the entire range, also
+ // verifies the CRC32.
+ bool ReadSparseRange(const SparseRange* range,
+ int offset, int len, char* buf);
+
+ // Writes to a single (existing) sparse range. If asked to write the entire
+ // range, also updates the CRC32; otherwise, invalidates it.
+ bool WriteSparseRange(SparseRange* range,
+ int offset, int len, const char* buf);
+
+ // Appends a new sparse range to the sparse data file.
+ bool AppendSparseRange(int64 offset, int len, const char* buf);
+
static bool DeleteFileForEntryHash(const base::FilePath& path,
uint64 entry_hash,
int file_index);
@@ -237,6 +304,10 @@ class SimpleSynchronousEntry {
base::FilePath GetFilenameFromFileIndex(int file_index);
+ bool sparse_file_open() const {
+ return sparse_file_ != base::kInvalidPlatformFileValue;
+ }
+
const net::CacheType cache_type_;
const base::FilePath path_;
const uint64 entry_hash_;
@@ -250,6 +321,18 @@ class SimpleSynchronousEntry {
// True if the corresponding stream is empty and therefore no on-disk file
// was created to store it.
bool empty_file_omitted_[kSimpleEntryFileCount];
+
+ typedef std::map<int64, SparseRange> SparseRangeOffsetMap;
+ typedef SparseRangeOffsetMap::iterator SparseRangeIterator;
+ SparseRangeOffsetMap sparse_ranges_;
+ base::PlatformFile sparse_file_;
+ // Offset of the end of the sparse file (where the next sparse range will be
+ // written).
+ int64 sparse_tail_offset_;
+
+ // True if the entry was created, or false if it was opened. Used to log
+ // SimpleCache.*.EntryCreatedWithStream2Omitted only for created entries.
+ bool files_created_;
};
} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_util.cc b/chromium/net/disk_cache/simple/simple_util.cc
index 4291b1f7773..0ad2a05d5a3 100644
--- a/chromium/net/disk_cache/simple/simple_util.cc
+++ b/chromium/net/disk_cache/simple/simple_util.cc
@@ -82,6 +82,10 @@ std::string GetFilenameFromEntryHashAndFileIndex(uint64 entry_hash,
return base::StringPrintf("%016" PRIx64 "_%1d", entry_hash, file_index);
}
+std::string GetSparseFilenameFromEntryHash(uint64 entry_hash) {
+ return base::StringPrintf("%016" PRIx64 "_s", entry_hash);
+}
+
std::string GetFilenameFromKeyAndFileIndex(const std::string& key,
int file_index) {
return GetEntryHashKeyAsHexString(key) +
@@ -122,7 +126,7 @@ bool GetMTime(const base::FilePath& path, base::Time* out_mtime) {
}
#endif
base::PlatformFileInfo file_info;
- if (!file_util::GetFileInfo(path, &file_info))
+ if (!base::GetFileInfo(path, &file_info))
return false;
*out_mtime = file_info.last_modified;
return true;
diff --git a/chromium/net/disk_cache/simple/simple_util.h b/chromium/net/disk_cache/simple/simple_util.h
index 60a237ecd86..f76215121a4 100644
--- a/chromium/net/disk_cache/simple/simple_util.h
+++ b/chromium/net/disk_cache/simple/simple_util.h
@@ -48,6 +48,9 @@ NET_EXPORT_PRIVATE std::string GetFilenameFromKeyAndFileIndex(
std::string GetFilenameFromEntryHashAndFileIndex(uint64 entry_hash,
int file_index);
+// Given a |key| for an entry, returns the name of the sparse data file.
+std::string GetSparseFilenameFromEntryHash(uint64 entry_hash);
+
// Given the size of a file holding a stream in the simple backend and the key
// to an entry, returns the number of bytes in the stream.
NET_EXPORT_PRIVATE int32 GetDataSizeFromKeyAndFileSize(const std::string& key,
diff --git a/chromium/net/disk_cache/simple/simple_version_upgrade_unittest.cc b/chromium/net/disk_cache/simple/simple_version_upgrade_unittest.cc
index c9d42f10660..a5493f7c6eb 100644
--- a/chromium/net/disk_cache/simple/simple_version_upgrade_unittest.cc
+++ b/chromium/net/disk_cache/simple/simple_version_upgrade_unittest.cc
@@ -31,12 +31,6 @@ const char kFakeIndexFileName[] = "index";
// Same as |SimpleIndexFile::kIndexFileName|.
const char kIndexFileName[] = "the-real-index";
-// Same as |SimpleIndexFile::kIndexDirectory|.
-const char kIndexDirectory[] = "index-dir";
-
-// Same as |SimpleIndexFile::kTempIndexFileName|.
-const char kTempIndexFileName[] = "temp-index";
-
bool WriteFakeIndexFileV5(const base::FilePath& cache_path) {
disk_cache::FakeIndexData data;
data.version = 5;
diff --git a/chromium/net/disk_cache/v3/backend_worker.cc b/chromium/net/disk_cache/v3/backend_worker.cc
index cbccfddb5c6..be12a96eebe 100644
--- a/chromium/net/disk_cache/v3/backend_worker.cc
+++ b/chromium/net/disk_cache/v3/backend_worker.cc
@@ -331,7 +331,7 @@ bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
}
bool BackendImpl::InitBackingStore(bool* file_created) {
- if (!file_util::CreateDirectory(path_))
+ if (!base::CreateDirectory(path_))
return false;
base::FilePath index_name = path_.AppendASCII(kIndexName);