diff options
author | Allan Sandfeld Jensen <allan.jensen@theqtcompany.com> | 2015-06-18 14:10:49 +0200 |
---|---|---|
committer | Oswald Buddenhagen <oswald.buddenhagen@theqtcompany.com> | 2015-06-18 13:53:24 +0000 |
commit | 813fbf95af77a531c57a8c497345ad2c61d475b3 (patch) | |
tree | 821b2c8de8365f21b6c9ba17a236fb3006a1d506 /chromium/net/disk_cache | |
parent | af6588f8d723931a298c995fa97259bb7f7deb55 (diff) | |
download | qtwebengine-chromium-813fbf95af77a531c57a8c497345ad2c61d475b3.tar.gz |
BASELINE: Update chromium to 44.0.2403.47
Change-Id: Ie056fedba95cf5e5c76b30c4b2c80fca4764aa2f
Reviewed-by: Oswald Buddenhagen <oswald.buddenhagen@theqtcompany.com>
Diffstat (limited to 'chromium/net/disk_cache')
50 files changed, 579 insertions, 607 deletions
diff --git a/chromium/net/disk_cache/backend_unittest.cc b/chromium/net/disk_cache/backend_unittest.cc index 843a6199df7..ba8d101c32e 100644 --- a/chromium/net/disk_cache/backend_unittest.cc +++ b/chromium/net/disk_cache/backend_unittest.cc @@ -6,8 +6,11 @@ #include "base/files/file_util.h" #include "base/metrics/field_trial.h" #include "base/port.h" +#include "base/run_loop.h" +#include "base/strings/string_split.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" +#include "base/test/mock_entropy_provider.h" #include "base/third_party/dynamic_annotations/dynamic_annotations.h" #include "base/thread_task_runner_handle.h" #include "base/threading/platform_thread.h" @@ -126,6 +129,7 @@ class DiskCacheBackendTest : public DiskCacheTestWithCache { void BackendDisable2(); void BackendDisable3(); void BackendDisable4(); + void BackendDisabledAPI(); }; int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) { @@ -348,8 +352,8 @@ TEST_F(DiskCacheBackendTest, ShaderCacheBasics) { void DiskCacheBackendTest::BackendKeying() { InitCache(); - const char* kName1 = "the first key"; - const char* kName2 = "the first Key"; + const char kName1[] = "the first key"; + const char kName2[] = "the first Key"; disk_cache::Entry *entry1, *entry2; ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1)); @@ -1838,16 +1842,6 @@ TEST_F(DiskCacheTest, WrongVersion) { ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv)); } -class BadEntropyProvider : public base::FieldTrial::EntropyProvider { - public: - ~BadEntropyProvider() override {} - - double GetEntropyForTrial(const std::string& trial_name, - uint32 randomization_seed) const override { - return 0.5; - } -}; - // Tests that the disk cache successfully joins the control group, dropping the // existing cache in favour of a new empty cache. // Disabled on android since this test requires cache creator to create @@ -1865,7 +1859,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlJoin) { // Instantiate the SimpleCacheTrial, forcing this run into the // ExperimentControl group. - base::FieldTrialList field_trial_list(new BadEntropyProvider()); + base::FieldTrialList field_trial_list(new base::MockEntropyProvider()); base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentControl"); net::TestCompletionCallback cb; @@ -1889,7 +1883,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlJoin) { TEST_F(DiskCacheTest, SimpleCacheControlRestart) { // Instantiate the SimpleCacheTrial, forcing this run into the // ExperimentControl group. - base::FieldTrialList field_trial_list(new BadEntropyProvider()); + base::FieldTrialList field_trial_list(new base::MockEntropyProvider()); base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentControl"); @@ -1929,7 +1923,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlLeave) { { // Instantiate the SimpleCacheTrial, forcing this run into the // ExperimentControl group. - base::FieldTrialList field_trial_list(new BadEntropyProvider()); + base::FieldTrialList field_trial_list(new base::MockEntropyProvider()); base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentControl"); @@ -1940,7 +1934,7 @@ TEST_F(DiskCacheTest, SimpleCacheControlLeave) { // Instantiate the SimpleCacheTrial, forcing this run into the // ExperimentNo group. - base::FieldTrialList field_trial_list(new BadEntropyProvider()); + base::FieldTrialList field_trial_list(new base::MockEntropyProvider()); base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo"); net::TestCompletionCallback cb; @@ -2733,6 +2727,51 @@ TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) { BackendDisable4(); } +// Tests the exposed API with a disabled cache. +void DiskCacheBackendTest::BackendDisabledAPI() { + cache_impl_->SetUnitTestMode(); // Simulate failure restarting the cache. + + disk_cache::Entry* entry1, *entry2; + scoped_ptr<TestIterator> iter = CreateIterator(); + EXPECT_EQ(2, cache_->GetEntryCount()); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1)); + entry1->Close(); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2)); + FlushQueueForTest(); + // The cache should be disabled. + + EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType()); + EXPECT_EQ(0, cache_->GetEntryCount()); + EXPECT_NE(net::OK, OpenEntry("First", &entry2)); + EXPECT_NE(net::OK, CreateEntry("Something new", &entry2)); + EXPECT_NE(net::OK, DoomEntry("First")); + EXPECT_NE(net::OK, DoomAllEntries()); + EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now())); + EXPECT_NE(net::OK, DoomEntriesSince(Time())); + iter = CreateIterator(); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2)); + + base::StringPairs stats; + cache_->GetStats(&stats); + EXPECT_TRUE(stats.empty()); + cache_->OnExternalCacheHit("First"); +} + +TEST_F(DiskCacheBackendTest, DisabledAPI) { + ASSERT_TRUE(CopyTestCache("bad_rankings2")); + DisableFirstCleanup(); + InitCache(); + BackendDisabledAPI(); +} + +TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) { + ASSERT_TRUE(CopyTestCache("bad_rankings2")); + DisableFirstCleanup(); + SetNewEviction(); + InitCache(); + BackendDisabledAPI(); +} + TEST_F(DiskCacheTest, Backend_UsageStatsTimer) { MessageLoopHelper helper; @@ -3230,7 +3269,7 @@ TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) { SetSimpleCacheMode(); InitCache(); - const char* key = "the first key"; + const char key[] = "the first key"; disk_cache::Entry* entry = NULL; ASSERT_EQ(net::OK, CreateEntry(key, &entry)); @@ -3266,7 +3305,7 @@ TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) { SetSimpleCacheMode(); InitCache(); - const char* key = "the first key"; + const char key[] = "the first key"; disk_cache::Entry* entry = NULL; ASSERT_EQ(net::OK, CreateEntry(key, &entry)); @@ -3282,6 +3321,10 @@ TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) { entry->Close(); entry = NULL; + // The entry is being closed on the Simple Cache worker pool + disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); + base::RunLoop().RunUntilIdle(); + // Write an invalid header for stream 0 and stream 1. base::FilePath entry_file1_path = cache_path_.AppendASCII( disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); diff --git a/chromium/net/disk_cache/blockfile/addr.h b/chromium/net/disk_cache/blockfile/addr.h index 99d2c930709..a5e4367757b 100644 --- a/chromium/net/disk_cache/blockfile/addr.h +++ b/chromium/net/disk_cache/blockfile/addr.h @@ -25,10 +25,10 @@ enum FileType { }; const int kMaxBlockSize = 4096 * 4; -const int kMaxBlockFile = 255; +const int16 kMaxBlockFile = 255; const int kMaxNumBlocks = 4; -const int kFirstAdditionalBlockFile = 4; -const int kFirstAdditionalBlockFileV3 = 7; +const int16 kFirstAdditionalBlockFile = 4; +const size_t kFirstAdditionalBlockFileV3 = 7; // Defines a storage address for a cache record // diff --git a/chromium/net/disk_cache/blockfile/backend_impl.cc b/chromium/net/disk_cache/blockfile/backend_impl.cc index 20995b711ab..c4058abcba3 100644 --- a/chromium/net/disk_cache/blockfile/backend_impl.cc +++ b/chromium/net/disk_cache/blockfile/backend_impl.cc @@ -13,7 +13,6 @@ #include "base/message_loop/message_loop.h" #include "base/metrics/field_trial.h" #include "base/metrics/histogram.h" -#include "base/metrics/stats_counters.h" #include "base/rand_util.h" #include "base/single_thread_task_runner.h" #include "base/strings/string_util.h" @@ -41,7 +40,7 @@ using base::TimeTicks; namespace { -const char* kIndexName = "index"; +const char kIndexName[] = "index"; // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. // Note that the actual target is to keep the index table load factor under 55% @@ -354,6 +353,9 @@ int BackendImpl::SyncDoomEntry(const std::string& key) { } int BackendImpl::SyncDoomAllEntries() { + if (disabled_) + return net::ERR_FAILED; + // This is not really an error, but it is an interesting condition. ReportError(ERR_CACHE_DOOMED); stats_.OnEvent(Stats::DOOM_CACHE); @@ -484,8 +486,10 @@ EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { if (!cache_entry) { CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start); CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size); - CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours); - CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours); + CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, + static_cast<base::HistogramBase::Sample>(total_hours)); + CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, + static_cast<base::HistogramBase::Sample>(use_hours)); stats_.OnEvent(Stats::OPEN_MISS); return NULL; } @@ -497,11 +501,12 @@ EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { cache_entry->entry()->address().value()); CACHE_UMA(AGE_MS, "OpenTime", 0, start); CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); - CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours); - CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours); + CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, + static_cast<base::HistogramBase::Sample>(total_hours)); + CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, + static_cast<base::HistogramBase::Sample>(use_hours)); stats_.OnEvent(Stats::OPEN_HIT); web_fonts_histogram::RecordCacheHit(cache_entry); - SIMPLE_STATS_COUNTER("disk_cache.hit"); return cache_entry; } @@ -597,7 +602,6 @@ EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { CACHE_UMA(AGE_MS, "CreateTime", 0, start); stats_.OnEvent(Stats::CREATE_HIT); - SIMPLE_STATS_COUNTER("disk_cache.miss"); Trace("create entry hit "); FlushIndex(); cache_entry->AddRef(); @@ -673,7 +677,8 @@ EntryImpl* BackendImpl::OpenNextEntryImpl(Rankings::Iterator* iterator) { } bool BackendImpl::SetMaxSize(int max_bytes) { - COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); + static_assert(sizeof(max_bytes) == sizeof(max_size_), + "unsupported int model"); if (max_bytes < 0) return false; @@ -1886,7 +1891,7 @@ void BackendImpl::ReportStats() { // time is the ratio of that bin's total count to the count in the same bin in // the TotalTime histogram. if (base::RandInt(0, 99) < hit_ratio_as_percentage) - CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours)); + CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, static_cast<int>(total_hours)); int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); @@ -1904,7 +1909,7 @@ void BackendImpl::ReportStats() { // is the ratio of that bin's total count to the count in the same bin in the // UseTime histogram. if (base::RandInt(0, 99) < hit_ratio_as_percentage) - CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours)); + CACHE_UMA(HOURS, "HitRatioByUseTime", 0, static_cast<int>(use_hours)); CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage); int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; diff --git a/chromium/net/disk_cache/blockfile/backend_impl_v3.cc b/chromium/net/disk_cache/blockfile/backend_impl_v3.cc index a1024b45409..64016c41371 100644 --- a/chromium/net/disk_cache/blockfile/backend_impl_v3.cc +++ b/chromium/net/disk_cache/blockfile/backend_impl_v3.cc @@ -12,7 +12,6 @@ #include "base/message_loop/message_loop.h" #include "base/metrics/field_trial.h" #include "base/metrics/histogram.h" -#include "base/metrics/stats_counters.h" #include "base/rand_util.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" @@ -91,7 +90,8 @@ int BackendImplV3::Init(const CompletionCallback& callback) { // ------------------------------------------------------------------------ bool BackendImplV3::SetMaxSize(int max_bytes) { - COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); + static_assert(sizeof(max_bytes) == sizeof(max_size_), + "unsupported int model"); if (max_bytes < 0) return false; @@ -229,7 +229,8 @@ bool BackendImplV3::IsLoaded() const { } std::string BackendImplV3::HistogramName(const char* name) const { - static const char* names[] = { "Http", "", "Media", "AppCache", "Shader" }; + static const char* const names[] = { + "Http", "", "Media", "AppCache", "Shader" }; DCHECK_NE(cache_type_, net::MEMORY_CACHE); return base::StringPrintf("DiskCache3.%s_%s", name, names[cache_type_]); } @@ -666,8 +667,8 @@ class BackendImplV3::IteratorImpl : public Backend::Iterator { : background_queue_(background_queue), data_(NULL) { } - virtual int OpenNextEntry(Entry** next_entry, - const net::CompletionCallback& callback) override { + int OpenNextEntry(Entry** next_entry, + const net::CompletionCallback& callback) override { if (!background_queue_) return net::ERR_FAILED; background_queue_->OpenNextEntry(&data_, next_entry, callback); diff --git a/chromium/net/disk_cache/blockfile/backend_worker_v3.cc b/chromium/net/disk_cache/blockfile/backend_worker_v3.cc index 1d89de6244a..b0f3250c52a 100644 --- a/chromium/net/disk_cache/blockfile/backend_worker_v3.cc +++ b/chromium/net/disk_cache/blockfile/backend_worker_v3.cc @@ -26,7 +26,7 @@ namespace { #if defined(V3_NOT_JUST_YET_READY) -const char* kIndexName = "index"; +const char kIndexName[] = "index"; // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. // Note that the actual target is to keep the index table load factor under 55% diff --git a/chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc b/chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc index 011793cd343..ab86fc2c016 100644 --- a/chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc +++ b/chromium/net/disk_cache/blockfile/block_bitmaps_v3.cc @@ -71,7 +71,7 @@ void BlockBitmaps::Clear() { void BlockBitmaps::ReportStats() { int used_blocks[kFirstAdditionalBlockFile]; int load[kFirstAdditionalBlockFile]; - for (int i = 0; i < kFirstAdditionalBlockFile; i++) { + for (int16 i = 0; i < kFirstAdditionalBlockFile; i++) { GetFileStats(i, &used_blocks[i], &load[i]); } UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_0", used_blocks[0]); @@ -104,7 +104,7 @@ bool BlockBitmaps::IsValid(Addr address) { } int BlockBitmaps::GetHeaderNumber(Addr address) { - DCHECK_GE(bitmaps_.size(), static_cast<size_t>(kFirstAdditionalBlockFileV3)); + DCHECK_GE(bitmaps_.size(), kFirstAdditionalBlockFileV3); DCHECK(address.is_block_file() || !address.is_initialized()); if (!address.is_initialized()) return -1; diff --git a/chromium/net/disk_cache/blockfile/block_files.cc b/chromium/net/disk_cache/blockfile/block_files.cc index 4ae217be40f..f46a9375349 100644 --- a/chromium/net/disk_cache/blockfile/block_files.cc +++ b/chromium/net/disk_cache/blockfile/block_files.cc @@ -20,7 +20,7 @@ using base::TimeTicks; namespace { -const char* kBlockName = "data_"; +const char kBlockName[] = "data_"; // This array is used to perform a fast lookup of the nibble bit pattern to the // type of entry that can be stored there (number of consecutive blocks). @@ -28,7 +28,7 @@ const char s_types[16] = {4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0}; // Returns the type of block (number of consecutive blocks that can be stored) // for a given nibble of the bitmap. -inline int GetMapBlockType(uint8 value) { +inline int GetMapBlockType(uint32 value) { value &= 0xf; return s_types[value]; } @@ -279,7 +279,7 @@ bool BlockFiles::Init(bool create_files) { thread_checker_.reset(new base::ThreadChecker); block_files_.resize(kFirstAdditionalBlockFile); - for (int i = 0; i < kFirstAdditionalBlockFile; i++) { + for (int16 i = 0; i < kFirstAdditionalBlockFile; i++) { if (create_files) if (!CreateBlockFile(i, static_cast<FileType>(i + 1), true)) return false; @@ -548,7 +548,7 @@ bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) { } MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) { - COMPILE_ASSERT(RANKINGS == 1, invalid_file_type); + static_assert(RANKINGS == 1, "invalid file type"); MappedFile* file = block_files_[block_type - 1]; BlockHeader file_header(file); @@ -574,7 +574,7 @@ MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) { MappedFile* BlockFiles::NextFile(MappedFile* file) { ScopedFlush flush(file); BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); - int new_file = header->next_file; + int16 new_file = header->next_file; if (!new_file) { // RANKINGS is not reported as a type for small entries, but we may be // extending the rankings block file. @@ -595,8 +595,8 @@ MappedFile* BlockFiles::NextFile(MappedFile* file) { return GetFile(address); } -int BlockFiles::CreateNextBlockFile(FileType block_type) { - for (int i = kFirstAdditionalBlockFile; i <= kMaxBlockFile; i++) { +int16 BlockFiles::CreateNextBlockFile(FileType block_type) { + for (int16 i = kFirstAdditionalBlockFile; i <= kMaxBlockFile; i++) { if (CreateBlockFile(i, block_type, false)) return i; } @@ -656,11 +656,11 @@ bool BlockFiles::FixBlockFileHeader(MappedFile* file) { if (file_size < file_header.Size()) return false; // file_size > 2GB is also an error. - const int kMinBlockSize = 36; - const int kMaxBlockSize = 4096; + const int kMinHeaderBlockSize = 36; + const int kMaxHeaderBlockSize = 4096; BlockFileHeader* header = file_header.Header(); - if (header->entry_size < kMinBlockSize || - header->entry_size > kMaxBlockSize || header->num_entries < 0) + if (header->entry_size < kMinHeaderBlockSize || + header->entry_size > kMaxHeaderBlockSize || header->num_entries < 0) return false; // Make sure that we survive crashes. diff --git a/chromium/net/disk_cache/blockfile/block_files.h b/chromium/net/disk_cache/blockfile/block_files.h index f84c314944a..7c2794a0303 100644 --- a/chromium/net/disk_cache/blockfile/block_files.h +++ b/chromium/net/disk_cache/blockfile/block_files.h @@ -136,7 +136,7 @@ class NET_EXPORT_PRIVATE BlockFiles { MappedFile* NextFile(MappedFile* file); // Creates an empty block file and returns its index. - int CreateNextBlockFile(FileType block_type); + int16 CreateNextBlockFile(FileType block_type); // Removes a chained block file that is now empty. bool RemoveEmptyFile(FileType block_type); diff --git a/chromium/net/disk_cache/blockfile/disk_format.h b/chromium/net/disk_cache/blockfile/disk_format.h index 95ac58b6274..b2e666abb2b 100644 --- a/chromium/net/disk_cache/blockfile/disk_format.h +++ b/chromium/net/disk_cache/blockfile/disk_format.h @@ -116,7 +116,7 @@ struct EntryStore { char key[256 - 24 * 4]; // null terminated }; -COMPILE_ASSERT(sizeof(EntryStore) == 256, bad_EntyStore); +static_assert(sizeof(EntryStore) == 256, "bad EntryStore"); const int kMaxInternalKeyLength = 4 * sizeof(EntryStore) - offsetof(EntryStore, key) - 1; @@ -146,7 +146,7 @@ struct RankingsNode { }; #pragma pack(pop) -COMPILE_ASSERT(sizeof(RankingsNode) == 36, bad_RankingsNode); +static_assert(sizeof(RankingsNode) == 36, "bad RankingsNode"); } // namespace disk_cache diff --git a/chromium/net/disk_cache/blockfile/disk_format_base.h b/chromium/net/disk_cache/blockfile/disk_format_base.h index 9f4c16e4fd4..1c49a48e737 100644 --- a/chromium/net/disk_cache/blockfile/disk_format_base.h +++ b/chromium/net/disk_cache/blockfile/disk_format_base.h @@ -61,7 +61,7 @@ struct BlockFileHeader { AllocBitmap allocation_map; }; -COMPILE_ASSERT(sizeof(BlockFileHeader) == kBlockHeaderSize, bad_header); +static_assert(sizeof(BlockFileHeader) == kBlockHeaderSize, "bad header"); // Sparse data support: // We keep a two level hierarchy to enable sparse data for an entry: the first @@ -124,8 +124,8 @@ struct SparseData { // The number of blocks stored by a child entry. const int kNumSparseBits = 1024; -COMPILE_ASSERT(sizeof(SparseData) == sizeof(SparseHeader) + kNumSparseBits / 8, - Invalid_SparseData_bitmap); +static_assert(sizeof(SparseData) == sizeof(SparseHeader) + kNumSparseBits / 8, + "invalid SparseData bitmap"); } // namespace disk_cache diff --git a/chromium/net/disk_cache/blockfile/disk_format_v3.h b/chromium/net/disk_cache/blockfile/disk_format_v3.h index f5811cc0fc4..f16648b5bf1 100644 --- a/chromium/net/disk_cache/blockfile/disk_format_v3.h +++ b/chromium/net/disk_cache/blockfile/disk_format_v3.h @@ -97,7 +97,7 @@ struct IndexBitmap { IndexHeaderV3 header; uint32 bitmap[kBaseBitmapBytes / 4]; // First page of the bitmap. }; -COMPILE_ASSERT(sizeof(IndexBitmap) == 4096, bad_IndexHeader); +static_assert(sizeof(IndexBitmap) == 4096, "bad IndexHeader"); // Possible states for a given entry. enum EntryState { @@ -109,7 +109,7 @@ enum EntryState { ENTRY_FIXING, // Inconsistent state. The entry is being verified. ENTRY_USED // The slot is in use (entry is present). }; -COMPILE_ASSERT(ENTRY_USED <= 7, state_uses_3_bits); +static_assert(ENTRY_USED <= 7, "state uses 3 bits"); enum EntryGroup { ENTRY_NO_USE = 0, // The entry has not been reused. @@ -118,7 +118,7 @@ enum EntryGroup { ENTRY_RESERVED, // Reserved for future use. ENTRY_EVICTED // The entry was deleted. }; -COMPILE_ASSERT(ENTRY_USED <= 7, group_uses_3_bits); +static_assert(ENTRY_USED <= 7, "group uses 3 bits"); #pragma pack(push, 1) struct IndexCell { @@ -183,7 +183,7 @@ struct IndexCell { uint64 first_part; uint8 last_part; }; -COMPILE_ASSERT(sizeof(IndexCell) == 9, bad_IndexCell); +static_assert(sizeof(IndexCell) == 9, "bad IndexCell"); const int kCellsPerBucket = 4; struct IndexBucket { @@ -191,7 +191,7 @@ struct IndexBucket { int32 next; uint32 hash; // The high order byte is reserved (should be zero). }; -COMPILE_ASSERT(sizeof(IndexBucket) == 44, bad_IndexBucket); +static_assert(sizeof(IndexBucket) == 44, "bad IndexBucket"); const int kBytesPerCell = 44 / kCellsPerBucket; // The main cache index. Backed by a file named index_tb1. @@ -225,7 +225,7 @@ struct EntryRecord { int32 pad[3]; uint32 self_hash; }; -COMPILE_ASSERT(sizeof(EntryRecord) == 104, bad_EntryRecord); +static_assert(sizeof(EntryRecord) == 104, "bad EntryRecord"); struct ShortEntryRecord { uint32 hash; @@ -239,7 +239,7 @@ struct ShortEntryRecord { uint32 long_hash[5]; uint32 self_hash; }; -COMPILE_ASSERT(sizeof(ShortEntryRecord) == 48, bad_ShortEntryRecord); +static_assert(sizeof(ShortEntryRecord) == 48, "bad ShortEntryRecord"); } // namespace disk_cache diff --git a/chromium/net/disk_cache/blockfile/entry_impl.cc b/chromium/net/disk_cache/blockfile/entry_impl.cc index 0a882dec6e6..a4f0ff01cff 100644 --- a/chromium/net/disk_cache/blockfile/entry_impl.cc +++ b/chromium/net/disk_cache/blockfile/entry_impl.cc @@ -62,7 +62,7 @@ class SyncCallback: public disk_cache::FileIOCallback { void SyncCallback::OnFileIOComplete(int bytes_copied) { entry_->DecrementIoCount(); if (!callback_.is_null()) { - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().EndEvent( end_event_type_, disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied)); @@ -315,7 +315,7 @@ void EntryImpl::DoomImpl() { int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_READ_DATA, CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); @@ -323,7 +323,7 @@ int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, int result = InternalReadData(index, offset, buf, buf_len, callback); - if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { + if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { net_log_.EndEvent( net::NetLog::TYPE_ENTRY_READ_DATA, CreateNetLogReadWriteCompleteCallback(result)); @@ -334,7 +334,7 @@ int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback, bool truncate) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); @@ -343,7 +343,7 @@ int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, int result = InternalWriteData(index, offset, buf, buf_len, callback, truncate); - if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { + if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { net_log_.EndEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, CreateNetLogReadWriteCompleteCallback(result)); @@ -776,7 +776,7 @@ std::string EntryImpl::GetKey() const { if (address.is_block_file()) offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; - COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); + static_assert(kNumStreams == kKeyFileIndex, "invalid key index"); File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, kKeyFileIndex); if (!key_file) diff --git a/chromium/net/disk_cache/blockfile/entry_impl.h b/chromium/net/disk_cache/blockfile/entry_impl.h index 1dc7ed5ac61..aafb59b66a2 100644 --- a/chromium/net/disk_cache/blockfile/entry_impl.h +++ b/chromium/net/disk_cache/blockfile/entry_impl.h @@ -6,11 +6,11 @@ #define NET_DISK_CACHE_BLOCKFILE_ENTRY_IMPL_H_ #include "base/memory/scoped_ptr.h" -#include "net/base/net_log.h" #include "net/disk_cache/blockfile/disk_format.h" #include "net/disk_cache/blockfile/storage_block-inl.h" #include "net/disk_cache/blockfile/storage_block.h" #include "net/disk_cache/disk_cache.h" +#include "net/log/net_log.h" namespace disk_cache { diff --git a/chromium/net/disk_cache/blockfile/entry_impl_v3.cc b/chromium/net/disk_cache/blockfile/entry_impl_v3.cc index c8898e069d6..29b063c8bdd 100644 --- a/chromium/net/disk_cache/blockfile/entry_impl_v3.cc +++ b/chromium/net/disk_cache/blockfile/entry_impl_v3.cc @@ -489,7 +489,7 @@ std::string EntryImplV3::GetKey() const { if (address.is_block_file()) offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; - COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); + static_assert(kNumStreams == kKeyFileIndex, "invalid key index"); File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, kKeyFileIndex); if (!key_file) @@ -547,7 +547,7 @@ int EntryImplV3::ReadData(int index, int offset, IOBuffer* buf, int buf_len, int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_READ_DATA, CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); @@ -555,7 +555,7 @@ int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, int result = InternalReadData(index, offset, buf, buf_len, callback); - if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { + if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { net_log_.EndEvent( net::NetLog::TYPE_ENTRY_READ_DATA, CreateNetLogReadWriteCompleteCallback(result)); @@ -586,7 +586,7 @@ int EntryImplV3::WriteData(int index, int offset, IOBuffer* buf, int buf_len, int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback, bool truncate) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); @@ -595,7 +595,7 @@ int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, int result = InternalWriteData(index, offset, buf, buf_len, callback, truncate); - if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { + if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { net_log_.EndEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, CreateNetLogReadWriteCompleteCallback(result)); diff --git a/chromium/net/disk_cache/blockfile/entry_impl_v3.h b/chromium/net/disk_cache/blockfile/entry_impl_v3.h index c693c2a79d1..e36834b6755 100644 --- a/chromium/net/disk_cache/blockfile/entry_impl_v3.h +++ b/chromium/net/disk_cache/blockfile/entry_impl_v3.h @@ -8,10 +8,10 @@ #include <string> #include "base/memory/scoped_ptr.h" -#include "net/base/net_log.h" #include "net/disk_cache/blockfile/disk_format_v3.h" #include "net/disk_cache/blockfile/storage_block.h" #include "net/disk_cache/disk_cache.h" +#include "net/log/net_log.h" namespace disk_cache { diff --git a/chromium/net/disk_cache/blockfile/file_ios.cc b/chromium/net/disk_cache/blockfile/file_ios.cc index ef0e2fa5ca0..3ffb5a58567 100644 --- a/chromium/net/disk_cache/blockfile/file_ios.cc +++ b/chromium/net/disk_cache/blockfile/file_ios.cc @@ -47,7 +47,7 @@ class FileBackgroundIO : public disk_cache::BackgroundIO { void Write(); private: - virtual ~FileBackgroundIO() {} + ~FileBackgroundIO() override {} disk_cache::FileIOCallback* callback_; @@ -64,7 +64,7 @@ class FileBackgroundIO : public disk_cache::BackgroundIO { class FileInFlightIO : public disk_cache::InFlightIO { public: FileInFlightIO() {} - virtual ~FileInFlightIO() {} + ~FileInFlightIO() override {} // These methods start an asynchronous operation. The arguments have the same // semantics of the File asynchronous operations, with the exception that the @@ -79,8 +79,8 @@ class FileInFlightIO : public disk_cache::InFlightIO { // |cancel| is true if the actual task posted to the thread is still // queued (because we are inside WaitForPendingIO), and false if said task is // the one performing the call. - virtual void OnOperationComplete(disk_cache::BackgroundIO* operation, - bool cancel) override; + void OnOperationComplete(disk_cache::BackgroundIO* operation, + bool cancel) override; private: DISALLOW_COPY_AND_ASSIGN(FileInFlightIO); diff --git a/chromium/net/disk_cache/blockfile/file_win.cc b/chromium/net/disk_cache/blockfile/file_win.cc index bf313128b46..81f8cf86fa8 100644 --- a/chromium/net/disk_cache/blockfile/file_win.cc +++ b/chromium/net/disk_cache/blockfile/file_win.cc @@ -26,13 +26,14 @@ struct MyOverlapped { disk_cache::FileIOCallback* callback_; }; -COMPILE_ASSERT(!offsetof(MyOverlapped, context_), starts_with_overlapped); +static_assert(offsetof(MyOverlapped, context_) == 0, + "should start with overlapped"); // Helper class to handle the IO completion notifications from the message loop. class CompletionHandler : public base::MessageLoopForIO::IOHandler { - virtual void OnIOCompleted(base::MessageLoopForIO::IOContext* context, - DWORD actual_bytes, - DWORD error); + void OnIOCompleted(base::MessageLoopForIO::IOContext* context, + DWORD actual_bytes, + DWORD error) override; }; static base::LazyInstance<CompletionHandler> g_completion_handler = diff --git a/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc b/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc index 2cb3ff087d4..28d7114ba61 100644 --- a/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc +++ b/chromium/net/disk_cache/blockfile/in_flight_backend_io.cc @@ -8,7 +8,6 @@ #include "base/bind_helpers.h" #include "base/compiler_specific.h" #include "base/logging.h" -#include "base/profiler/scoped_tracker.h" #include "base/single_thread_task_runner.h" #include "net/base/net_errors.h" #include "net/disk_cache/blockfile/backend_impl.h" @@ -65,14 +64,8 @@ void BackendIO::OnDone(bool cancel) { if (result() == net::OK) { static_cast<EntryImpl*>(*entry_ptr_)->OnEntryCreated(backend_); - if (cancel) { - // TODO(vadimt): Remove ScopedTracker below once crbug.com/422516 is - // fixed. - tracked_objects::ScopedTracker tracking_profile( - FROM_HERE_WITH_EXPLICIT_FUNCTION("422516 BackendIO::OnDone")); - + if (cancel) (*entry_ptr_)->Close(); - } } } @@ -503,14 +496,8 @@ void InFlightBackendIO::OnOperationComplete(BackgroundIO* operation, BackendIO* op = static_cast<BackendIO*>(operation); op->OnDone(cancel); - if (!op->callback().is_null() && (!cancel || op->IsEntryOperation())) { - // TODO(vadimt): Remove ScopedTracker below once crbug.com/422516 is fixed. - tracked_objects::ScopedTracker tracking_profile( - FROM_HERE_WITH_EXPLICIT_FUNCTION( - "422516 InFlightBackendIO::OnOperationComplete")); - + if (!op->callback().is_null() && (!cancel || op->IsEntryOperation())) op->callback().Run(op->result()); - } } void InFlightBackendIO::PostOperation(BackendIO* operation) { diff --git a/chromium/net/disk_cache/blockfile/in_flight_io.cc b/chromium/net/disk_cache/blockfile/in_flight_io.cc index cbfcd1729fc..d5f172b6edd 100644 --- a/chromium/net/disk_cache/blockfile/in_flight_io.cc +++ b/chromium/net/disk_cache/blockfile/in_flight_io.cc @@ -21,6 +21,9 @@ BackgroundIO::BackgroundIO(InFlightIO* controller) // Runs on the primary thread. void BackgroundIO::OnIOSignalled() { + // TODO(pkasting): Remove ScopedTracker below once crbug.com/477117 is fixed. + tracked_objects::ScopedTracker tracking_profile( + FROM_HERE_WITH_EXPLICIT_FUNCTION("477117 BackgroundIO::OnIOSignalled")); if (controller_) controller_->InvokeCallback(this, false); } @@ -88,10 +91,6 @@ void InFlightIO::OnIOComplete(BackgroundIO* operation) { // Runs on the primary thread. void InFlightIO::InvokeCallback(BackgroundIO* operation, bool cancel_task) { { - // TODO(vadimt): Remove ScopedTracker below once crbug.com/422516 is fixed. - tracked_objects::ScopedTracker tracking_profile( - FROM_HERE_WITH_EXPLICIT_FUNCTION("422516 InFlightIO::InvokeCallback")); - // http://crbug.com/74623 base::ThreadRestrictions::ScopedAllowWait allow_wait; operation->io_completed()->Wait(); diff --git a/chromium/net/disk_cache/blockfile/sparse_control.cc b/chromium/net/disk_cache/blockfile/sparse_control.cc index 30ea836fc7f..926cf360f19 100644 --- a/chromium/net/disk_cache/blockfile/sparse_control.cc +++ b/chromium/net/disk_cache/blockfile/sparse_control.cc @@ -161,7 +161,7 @@ net::NetLog::EventType GetSparseEventType( void LogChildOperationEnd(const net::BoundNetLog& net_log, disk_cache::SparseControl::SparseOperation operation, int result) { - if (net_log.IsLogging()) { + if (net_log.IsCapturing()) { net::NetLog::EventType event_type; switch (operation) { case disk_cache::SparseControl::kReadOperation: @@ -275,7 +275,7 @@ int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf, finished_ = false; abort_ = false; - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().BeginEvent( GetSparseEventType(operation_), CreateNetLogSparseOperationCallback(offset_, buf_len_)); @@ -483,7 +483,7 @@ bool SparseControl::OpenChild() { return KillChildAndContinue(key, false); if (child_data_.header.last_block_len < 0 || - child_data_.header.last_block_len > kBlockSize) { + child_data_.header.last_block_len >= kBlockSize) { // Make sure these values are always within range. child_data_.header.last_block_len = 0; child_data_.header.last_block = -1; @@ -590,7 +590,7 @@ bool SparseControl::VerifyRange() { if (child_map_.FindNextBit(&start, last_bit, false)) { // Something is not here. DCHECK_GE(child_data_.header.last_block_len, 0); - DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize); + DCHECK_LT(child_data_.header.last_block_len, kBlockSize); int partial_block_len = PartialBlockLength(start); if (start == child_offset_ >> 10) { // It looks like we don't have anything. @@ -615,7 +615,7 @@ void SparseControl::UpdateRange(int result) { return; DCHECK_GE(child_data_.header.last_block_len, 0); - DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize); + DCHECK_LT(child_data_.header.last_block_len, kBlockSize); // Write the bitmap. int first_bit = child_offset_ >> 10; @@ -651,11 +651,6 @@ int SparseControl::PartialBlockLength(int block_index) const { if (block_index == child_data_.header.last_block) return child_data_.header.last_block_len; - // This may be the last stored index. - int entry_len = child_->GetDataSize(kSparseData); - if (block_index == entry_len >> 10) - return entry_len & (kBlockSize - 1); - // This is really empty. return 0; } @@ -683,15 +678,13 @@ void SparseControl::DoChildrenIO() { // Range operations are finished synchronously, often without setting // |finished_| to true. - if (kGetRangeOperation == operation_ && - entry_->net_log().IsLogging()) { + if (kGetRangeOperation == operation_ && entry_->net_log().IsCapturing()) { entry_->net_log().EndEvent( net::NetLog::TYPE_SPARSE_GET_RANGE, CreateNetLogGetAvailableRangeResultCallback(offset_, result_)); } if (finished_) { - if (kGetRangeOperation != operation_ && - entry_->net_log().IsLogging()) { + if (kGetRangeOperation != operation_ && entry_->net_log().IsCapturing()) { entry_->net_log().EndEvent(GetSparseEventType(operation_)); } if (pending_) @@ -721,7 +714,7 @@ bool SparseControl::DoChildIO() { int rv = 0; switch (operation_) { case kReadOperation: - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().BeginEvent( net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, CreateNetLogSparseReadWriteCallback(child_->net_log().source(), @@ -731,7 +724,7 @@ bool SparseControl::DoChildIO() { child_len_, callback); break; case kWriteOperation: - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().BeginEvent( net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, CreateNetLogSparseReadWriteCallback(child_->net_log().source(), @@ -769,27 +762,49 @@ int SparseControl::DoGetAvailableRange() { if (!child_) return child_len_; // Move on to the next child. - // Check that there are no holes in this range. - int last_bit = (child_offset_ + child_len_ + 1023) >> 10; + // Bits on the bitmap should only be set when the corresponding block was + // fully written (it's really being used). If a block is partially used, it + // has to start with valid data, the length of the valid data is saved in + // |header.last_block_len| and the block itself should match + // |header.last_block|. + // + // In other words, (|header.last_block| + |header.last_block_len|) is the + // offset where the last write ended, and data in that block (which is not + // marked as used because it is not full) will only be reused if the next + // write continues at that point. + // + // This code has to find if there is any data between child_offset_ and + // child_offset_ + child_len_. + int last_bit = (child_offset_ + child_len_ + kBlockSize - 1) >> 10; int start = child_offset_ >> 10; int partial_start_bytes = PartialBlockLength(start); int found = start; int bits_found = child_map_.FindBits(&found, last_bit, true); + bool is_last_block_in_range = start < child_data_.header.last_block && + child_data_.header.last_block < last_bit; - // We don't care if there is a partial block in the middle of the range. int block_offset = child_offset_ & (kBlockSize - 1); - if (!bits_found && partial_start_bytes <= block_offset) - return child_len_; + if (!bits_found && partial_start_bytes <= block_offset) { + if (!is_last_block_in_range) + return child_len_; + found = last_bit - 1; // There are some bytes here. + } // We are done. Just break the loop and reset result_ to our real result. range_found_ = true; - // found now points to the first 1. Lets see if we have zeros before it. - int empty_start = std::max((found << 10) - child_offset_, 0); - int bytes_found = bits_found << 10; bytes_found += PartialBlockLength(found + bits_found); + // found now points to the first bytes. Lets see if we have data before it. + int empty_start = std::max((found << 10) - child_offset_, 0); + if (empty_start >= child_len_) + return child_len_; + + // At this point we have bytes_found stored after (found << 10), and we want + // child_len_ bytes after child_offset_. The first empty_start bytes after + // child_offset_ are invalid. + if (start == found) bytes_found -= block_offset; @@ -798,7 +813,7 @@ int SparseControl::DoGetAvailableRange() { // query that we have to subtract from the range that we searched. result_ = std::min(bytes_found, child_len_ - empty_start); - if (!bits_found) { + if (partial_start_bytes) { result_ = std::min(partial_start_bytes - block_offset, child_len_); empty_start = 0; } @@ -839,7 +854,7 @@ void SparseControl::OnChildIOCompleted(int result) { // We'll return the current result of the operation, which may be less than // the bytes to read or write, but the user cancelled the operation. abort_ = false; - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().AddEvent(net::NetLog::TYPE_CANCELLED); entry_->net_log().EndEvent(GetSparseEventType(operation_)); } diff --git a/chromium/net/disk_cache/blockfile/sparse_control_v3.cc b/chromium/net/disk_cache/blockfile/sparse_control_v3.cc index cba0ed5393f..bee23dcce25 100644 --- a/chromium/net/disk_cache/blockfile/sparse_control_v3.cc +++ b/chromium/net/disk_cache/blockfile/sparse_control_v3.cc @@ -56,7 +56,7 @@ class ChildrenDeleter ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name) : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {} - virtual void OnFileIOComplete(int bytes_copied) override; + void OnFileIOComplete(int bytes_copied) override; // Two ways of deleting the children: if we have the children map, use Start() // directly, otherwise pass the data address to ReadData(). @@ -65,7 +65,7 @@ class ChildrenDeleter private: friend class base::RefCounted<ChildrenDeleter>; - virtual ~ChildrenDeleter() {} + ~ChildrenDeleter() override {} void DeleteChildren(); @@ -163,7 +163,7 @@ net::NetLog::EventType GetSparseEventType( void LogChildOperationEnd(const net::BoundNetLog& net_log, disk_cache::SparseControl::SparseOperation operation, int result) { - if (net_log.IsLogging()) { + if (net_log.IsCapturing()) { net::NetLog::EventType event_type; switch (operation) { case disk_cache::SparseControl::kReadOperation: @@ -254,7 +254,7 @@ int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf, finished_ = false; abort_ = false; - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().BeginEvent( GetSparseEventType(operation_), CreateNetLogSparseOperationCallback(offset_, buf_len_)); @@ -563,7 +563,7 @@ bool SparseControl::DoChildIO() { int rv = 0; switch (operation_) { case kReadOperation: - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().BeginEvent( net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, CreateNetLogSparseReadWriteCallback(child_->net_log().source(), @@ -573,7 +573,7 @@ bool SparseControl::DoChildIO() { child_len_, callback); break; case kWriteOperation: - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().BeginEvent( net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, CreateNetLogSparseReadWriteCallback(child_->net_log().source(), @@ -846,7 +846,7 @@ void SparseControl::OnChildIOCompleted(int result) { // We'll return the current result of the operation, which may be less than // the bytes to read or write, but the user cancelled the operation. abort_ = false; - if (entry_->net_log().IsLogging()) { + if (entry_->net_log().IsCapturing()) { entry_->net_log().AddEvent(net::NetLog::TYPE_CANCELLED); entry_->net_log().EndEvent(GetSparseEventType(operation_)); } diff --git a/chromium/net/disk_cache/blockfile/stats.cc b/chromium/net/disk_cache/blockfile/stats.cc index 70592e4bded..bb319f55b15 100644 --- a/chromium/net/disk_cache/blockfile/stats.cc +++ b/chromium/net/disk_cache/blockfile/stats.cc @@ -24,7 +24,7 @@ struct OnDiskStats { int data_sizes[disk_cache::Stats::kDataSizesLength]; int64 counters[disk_cache::Stats::MAX_COUNTER]; }; -COMPILE_ASSERT(sizeof(OnDiskStats) < 512, needs_more_than_2_blocks); +static_assert(sizeof(OnDiskStats) < 512, "needs more than 2 blocks"); // Returns the "floor" (as opposed to "ceiling") of log base 2 of number. int LogBase2(int32 number) { @@ -43,7 +43,7 @@ int LogBase2(int32 number) { } // WARNING: Add new stats only at the end, or change LoadStats(). -static const char* kCounterNames[] = { +const char* const kCounterNames[] = { "Open miss", "Open hit", "Create miss", @@ -67,8 +67,8 @@ static const char* kCounterNames[] = { "Doom recent entries", "unused" }; -COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER, - update_the_names); +static_assert(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER, + "update the names"); } // namespace @@ -107,8 +107,18 @@ bool Stats::Init(void* data, int num_bytes, Addr address) { local_stats.size = sizeof(local_stats); } else if (num_bytes >= static_cast<int>(sizeof(*stats))) { stats = reinterpret_cast<OnDiskStats*>(data); - if (!VerifyStats(stats)) - return false; + if (!VerifyStats(stats)) { + memset(&local_stats, 0, sizeof(local_stats)); + if (memcmp(stats, &local_stats, sizeof(local_stats))) { + return false; + } else { + // The storage is empty which means that SerializeStats() was never + // called on the last run. Just re-initialize everything. + local_stats.signature = kDiskSignature; + local_stats.size = sizeof(local_stats); + stats = &local_stats; + } + } } else { return false; } @@ -155,7 +165,7 @@ void Stats::InitSizeHistogram() { int Stats::StorageSize() { // If we have more than 512 bytes of counters, change kDiskSignature so we // don't overwrite something else (LoadStats must fail). - COMPILE_ASSERT(sizeof(OnDiskStats) <= 256 * 2, use_more_blocks); + static_assert(sizeof(OnDiskStats) <= 256 * 2, "use more blocks"); return 256 * 2; } @@ -300,7 +310,7 @@ int Stats::GetStatsBucket(int32 size) { // From this point on, use a logarithmic scale. int result = LogBase2(size) + 1; - COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale); + static_assert(kDataSizesLength > 16, "update the scale"); if (result >= kDataSizesLength) result = kDataSizesLength - 1; diff --git a/chromium/net/disk_cache/blockfile/stats.h b/chromium/net/disk_cache/blockfile/stats.h index 8e1293d61d4..32dcc2f22b5 100644 --- a/chromium/net/disk_cache/blockfile/stats.h +++ b/chromium/net/disk_cache/blockfile/stats.h @@ -6,9 +6,10 @@ #define NET_DISK_CACHE_BLOCKFILE_STATS_H_ #include <string> -#include <vector> #include "base/basictypes.h" +#include "base/strings/string_split.h" +#include "net/base/net_export.h" #include "net/disk_cache/blockfile/addr.h" namespace base { @@ -17,10 +18,10 @@ class HistogramSamples; namespace disk_cache { -typedef std::vector<std::pair<std::string, std::string> > StatsItems; +using StatsItems = base::StringPairs; // This class stores cache-specific usage information, for tunning purposes. -class Stats { +class NET_EXPORT_PRIVATE Stats { public: static const int kDataSizesLength = 28; enum Counters { diff --git a/chromium/net/disk_cache/blockfile/stats_unittest.cc b/chromium/net/disk_cache/blockfile/stats_unittest.cc new file mode 100644 index 00000000000..fe47bdd4cf6 --- /dev/null +++ b/chromium/net/disk_cache/blockfile/stats_unittest.cc @@ -0,0 +1,83 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/disk_cache/blockfile/stats.h" + +#include "base/memory/scoped_ptr.h" +#include "testing/gtest/include/gtest/gtest.h" + +TEST(DiskCacheStatsTest, Init) { + disk_cache::Stats stats; + EXPECT_TRUE(stats.Init(nullptr, 0, disk_cache::Addr())); + EXPECT_EQ(0, stats.GetCounter(disk_cache::Stats::TRIM_ENTRY)); +} + +TEST(DiskCacheStatsTest, InitWithEmptyBuffer) { + disk_cache::Stats stats; + int required_len = stats.StorageSize(); + scoped_ptr<char[]> storage(new char[required_len]); + memset(storage.get(), 0, required_len); + + ASSERT_TRUE(stats.Init(storage.get(), required_len, disk_cache::Addr())); + EXPECT_EQ(0, stats.GetCounter(disk_cache::Stats::TRIM_ENTRY)); +} + +TEST(DiskCacheStatsTest, FailsInit) { + disk_cache::Stats stats; + int required_len = stats.StorageSize(); + scoped_ptr<char[]> storage(new char[required_len]); + memset(storage.get(), 0, required_len); + + // Try a small buffer. + EXPECT_LT(200, required_len); + disk_cache::Addr addr; + EXPECT_FALSE(stats.Init(storage.get(), 200, addr)); + + // Try a buffer with garbage. + memset(storage.get(), 'a', required_len); + EXPECT_FALSE(stats.Init(storage.get(), required_len, addr)); +} + +TEST(DiskCacheStatsTest, SaveRestore) { + scoped_ptr<disk_cache::Stats> stats(new disk_cache::Stats); + + disk_cache::Addr addr(5); + ASSERT_TRUE(stats->Init(nullptr, 0, addr)); + stats->SetCounter(disk_cache::Stats::CREATE_ERROR, 11); + stats->SetCounter(disk_cache::Stats::DOOM_ENTRY, 13); + stats->OnEvent(disk_cache::Stats::MIN_COUNTER); + stats->OnEvent(disk_cache::Stats::TRIM_ENTRY); + stats->OnEvent(disk_cache::Stats::DOOM_RECENT); + + int required_len = stats->StorageSize(); + scoped_ptr<char[]> storage(new char[required_len]); + disk_cache::Addr out_addr; + int real_len = stats->SerializeStats(storage.get(), required_len, &out_addr); + EXPECT_GE(required_len, real_len); + EXPECT_EQ(out_addr, addr); + + stats.reset(new disk_cache::Stats); + ASSERT_TRUE(stats->Init(storage.get(), real_len, addr)); + EXPECT_EQ(1, stats->GetCounter(disk_cache::Stats::MIN_COUNTER)); + EXPECT_EQ(1, stats->GetCounter(disk_cache::Stats::TRIM_ENTRY)); + EXPECT_EQ(1, stats->GetCounter(disk_cache::Stats::DOOM_RECENT)); + EXPECT_EQ(0, stats->GetCounter(disk_cache::Stats::OPEN_HIT)); + EXPECT_EQ(0, stats->GetCounter(disk_cache::Stats::READ_DATA)); + EXPECT_EQ(0, stats->GetCounter(disk_cache::Stats::LAST_REPORT_TIMER)); + EXPECT_EQ(11, stats->GetCounter(disk_cache::Stats::CREATE_ERROR)); + EXPECT_EQ(13, stats->GetCounter(disk_cache::Stats::DOOM_ENTRY)); + + // Now pass the whole buffer. It shoulod not matter that there is unused + // space at the end. + stats.reset(new disk_cache::Stats); + ASSERT_TRUE(stats->Init(storage.get(), required_len, addr)); + EXPECT_EQ(1, stats->GetCounter(disk_cache::Stats::MIN_COUNTER)); + EXPECT_EQ(1, stats->GetCounter(disk_cache::Stats::TRIM_ENTRY)); + EXPECT_EQ(1, stats->GetCounter(disk_cache::Stats::DOOM_RECENT)); + EXPECT_EQ(0, stats->GetCounter(disk_cache::Stats::OPEN_HIT)); + EXPECT_EQ(0, stats->GetCounter(disk_cache::Stats::READ_DATA)); + EXPECT_EQ(0, stats->GetCounter(disk_cache::Stats::LAST_REPORT_TIMER)); + EXPECT_EQ(11, stats->GetCounter(disk_cache::Stats::CREATE_ERROR)); + EXPECT_EQ(13, stats->GetCounter(disk_cache::Stats::DOOM_ENTRY)); +} diff --git a/chromium/net/disk_cache/blockfile/storage_block.h b/chromium/net/disk_cache/blockfile/storage_block.h index 0d514741b6a..6db4a3101de 100644 --- a/chromium/net/disk_cache/blockfile/storage_block.h +++ b/chromium/net/disk_cache/blockfile/storage_block.h @@ -34,9 +34,9 @@ class StorageBlock : public FileBlock { virtual ~StorageBlock(); // FileBlock interface. - virtual void* buffer() const; - virtual size_t size() const; - virtual int offset() const; + void* buffer() const override; + size_t size() const override; + int offset() const override; // Allows the overide of dummy values passed on the constructor. bool LazyInit(MappedFile* file, Addr address); diff --git a/chromium/net/disk_cache/blockfile/stress_cache.cc b/chromium/net/disk_cache/blockfile/stress_cache.cc deleted file mode 100644 index f28e8af9fd5..00000000000 --- a/chromium/net/disk_cache/blockfile/stress_cache.cc +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// This is a simple application that stress-tests the crash recovery of the disk -// cache. The main application starts a copy of itself on a loop, checking the -// exit code of the child process. When the child dies in an unexpected way, -// the main application quits. - -// The child application has two threads: one to exercise the cache in an -// infinite loop, and another one to asynchronously kill the process. - -// A regular build should never crash. -// To test that the disk cache doesn't generate critical errors with regular -// application level crashes, edit stress_support.h. - -#include <string> -#include <vector> - -#include "base/at_exit.h" -#include "base/bind.h" -#include "base/command_line.h" -#include "base/debug/debugger.h" -#include "base/files/file_path.h" -#include "base/logging.h" -#include "base/message_loop/message_loop.h" -#include "base/path_service.h" -#include "base/process/kill.h" -#include "base/process/launch.h" -#include "base/process/process_handle.h" -#include "base/strings/string_number_conversions.h" -#include "base/strings/string_util.h" -#include "base/strings/utf_string_conversions.h" -#include "base/threading/platform_thread.h" -#include "base/threading/thread.h" -#include "net/base/io_buffer.h" -#include "net/base/net_errors.h" -#include "net/base/test_completion_callback.h" -#include "net/disk_cache/blockfile/backend_impl.h" -#include "net/disk_cache/blockfile/stress_support.h" -#include "net/disk_cache/blockfile/trace.h" -#include "net/disk_cache/disk_cache.h" -#include "net/disk_cache/disk_cache_test_util.h" - -#if defined(OS_WIN) -#include "base/logging_win.h" -#endif - -using base::Time; - -const int kError = -1; -const int kExpectedCrash = 100; - -// Starts a new process. -int RunSlave(int iteration) { - base::FilePath exe; - PathService::Get(base::FILE_EXE, &exe); - - base::CommandLine cmdline(exe); - cmdline.AppendArg(base::IntToString(iteration)); - - base::ProcessHandle handle; - if (!base::LaunchProcess(cmdline, base::LaunchOptions(), &handle)) { - printf("Unable to run test\n"); - return kError; - } - - int exit_code; - if (!base::WaitForExitCode(handle, &exit_code)) { - printf("Unable to get return code\n"); - return kError; - } - return exit_code; -} - -// Main loop for the master process. -int MasterCode() { - for (int i = 0; i < 100000; i++) { - int ret = RunSlave(i); - if (kExpectedCrash != ret) - return ret; - } - - printf("More than enough...\n"); - - return 0; -} - -// ----------------------------------------------------------------------- - -std::string GenerateStressKey() { - char key[20 * 1024]; - size_t size = 50 + rand() % 20000; - CacheTestFillBuffer(key, size, true); - - key[size - 1] = '\0'; - return std::string(key); -} - -// This thread will loop forever, adding and removing entries from the cache. -// iteration is the current crash cycle, so the entries on the cache are marked -// to know which instance of the application wrote them. -void StressTheCache(int iteration) { - int cache_size = 0x2000000; // 32MB. - uint32 mask = 0xfff; // 4096 entries. - - base::FilePath path; - PathService::Get(base::DIR_TEMP, &path); - path = path.AppendASCII("cache_test_stress"); - - base::Thread cache_thread("CacheThread"); - if (!cache_thread.StartWithOptions( - base::Thread::Options(base::MessageLoop::TYPE_IO, 0))) - return; - - disk_cache::BackendImpl* cache = - new disk_cache::BackendImpl(path, mask, - cache_thread.message_loop_proxy().get(), - NULL); - cache->SetMaxSize(cache_size); - cache->SetFlags(disk_cache::kNoLoadProtection); - - net::TestCompletionCallback cb; - int rv = cache->Init(cb.callback()); - - if (cb.GetResult(rv) != net::OK) { - printf("Unable to initialize cache.\n"); - return; - } - printf("Iteration %d, initial entries: %d\n", iteration, - cache->GetEntryCount()); - - int seed = static_cast<int>(Time::Now().ToInternalValue()); - srand(seed); - - // kNumKeys is meant to be enough to have about 3x or 4x iterations before - // the process crashes. -#ifdef NDEBUG - const int kNumKeys = 4000; -#else - const int kNumKeys = 1200; -#endif - const int kNumEntries = 30; - std::string keys[kNumKeys]; - disk_cache::Entry* entries[kNumEntries] = {0}; - - for (int i = 0; i < kNumKeys; i++) { - keys[i] = GenerateStressKey(); - } - - const int kSize = 20000; - scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); - memset(buffer->data(), 'k', kSize); - - for (int i = 0;; i++) { - int slot = rand() % kNumEntries; - int key = rand() % kNumKeys; - bool truncate = (rand() % 2 == 0); - int size = kSize - (rand() % 20) * kSize / 20; - - if (entries[slot]) - entries[slot]->Close(); - - net::TestCompletionCallback cb; - rv = cache->OpenEntry(keys[key], &entries[slot], cb.callback()); - if (cb.GetResult(rv) != net::OK) { - rv = cache->CreateEntry(keys[key], &entries[slot], cb.callback()); - CHECK_EQ(net::OK, cb.GetResult(rv)); - } - - base::snprintf(buffer->data(), kSize, - "i: %d iter: %d, size: %d, truncate: %d ", i, iteration, - size, truncate ? 1 : 0); - rv = entries[slot]->WriteData(0, 0, buffer.get(), size, cb.callback(), - truncate); - CHECK_EQ(size, cb.GetResult(rv)); - - if (rand() % 100 > 80) { - key = rand() % kNumKeys; - net::TestCompletionCallback cb2; - rv = cache->DoomEntry(keys[key], cb2.callback()); - cb2.GetResult(rv); - } - - if (!(i % 100)) - printf("Entries: %d \r", i); - } -} - -// We want to prevent the timer thread from killing the process while we are -// waiting for the debugger to attach. -bool g_crashing = false; - -// RunSoon() and CrashCallback() reference each other, unfortunately. -void RunSoon(base::MessageLoop* target_loop); - -void CrashCallback() { - // Keep trying to run. - RunSoon(base::MessageLoop::current()); - - if (g_crashing) - return; - - if (rand() % 100 > 30) { - printf("sweet death...\n"); -#if defined(OS_WIN) - // Windows does more work on _exit() that we would like, so we use Kill. - base::KillProcessById(base::GetCurrentProcId(), kExpectedCrash, false); -#elif defined(OS_POSIX) - // On POSIX, _exit() will terminate the process with minimal cleanup, - // and it is cleaner than killing. - _exit(kExpectedCrash); -#endif - } -} - -void RunSoon(base::MessageLoop* target_loop) { - const base::TimeDelta kTaskDelay = base::TimeDelta::FromSeconds(10); - target_loop->PostDelayedTask( - FROM_HERE, base::Bind(&CrashCallback), kTaskDelay); -} - -// We leak everything here :) -bool StartCrashThread() { - base::Thread* thread = new base::Thread("party_crasher"); - if (!thread->Start()) - return false; - - RunSoon(thread->message_loop()); - return true; -} - -void CrashHandler(const std::string& str) { - g_crashing = true; - base::debug::BreakDebugger(); -} - -bool MessageHandler(int severity, const char* file, int line, - size_t message_start, const std::string& str) { - const size_t kMaxMessageLen = 48; - char message[kMaxMessageLen]; - size_t len = std::min(str.length() - message_start, kMaxMessageLen - 1); - - memcpy(message, str.c_str() + message_start, len); - message[len] = '\0'; -#if !defined(DISK_CACHE_TRACE_TO_LOG) - disk_cache::Trace("%s", message); -#endif - return false; -} - -// ----------------------------------------------------------------------- - -#if defined(OS_WIN) -// {B9A153D4-31C3-48e4-9ABF-D54383F14A0D} -const GUID kStressCacheTraceProviderName = { - 0xb9a153d4, 0x31c3, 0x48e4, - { 0x9a, 0xbf, 0xd5, 0x43, 0x83, 0xf1, 0x4a, 0xd } }; -#endif - -int main(int argc, const char* argv[]) { - // Setup an AtExitManager so Singleton objects will be destructed. - base::AtExitManager at_exit_manager; - - if (argc < 2) - return MasterCode(); - - logging::SetLogAssertHandler(CrashHandler); - logging::SetLogMessageHandler(MessageHandler); - -#if defined(OS_WIN) - logging::LogEventProvider::Initialize(kStressCacheTraceProviderName); -#else - base::CommandLine::Init(argc, argv); - logging::LoggingSettings settings; - settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG; - logging::InitLogging(settings); -#endif - - // Some time for the memory manager to flush stuff. - base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(3)); - base::MessageLoopForIO message_loop; - - char* end; - long int iteration = strtol(argv[1], &end, 0); - - if (!StartCrashThread()) { - printf("failed to start thread\n"); - return kError; - } - - StressTheCache(iteration); - return 0; -} diff --git a/chromium/net/disk_cache/cache_creator.cc b/chromium/net/disk_cache/cache_creator.cc index 93c06b3232d..8c4400c2f23 100644 --- a/chromium/net/disk_cache/cache_creator.cc +++ b/chromium/net/disk_cache/cache_creator.cc @@ -4,7 +4,6 @@ #include "base/files/file_path.h" #include "base/metrics/field_trial.h" -#include "base/profiler/scoped_tracker.h" #include "base/single_thread_task_runner.h" #include "base/strings/stringprintf.h" #include "net/base/cache_type.h" @@ -140,10 +139,6 @@ void CacheCreator::DoCallback(int result) { // If the initialization of the cache fails, and |force| is true, we will // discard the whole cache and create a new one. void CacheCreator::OnIOComplete(int result) { - // TODO(vadimt): Remove ScopedTracker below once crbug.com/422516 is fixed. - tracked_objects::ScopedTracker tracking_profile( - FROM_HERE_WITH_EXPLICIT_FUNCTION("422516 CacheCreator::OnIOComplete")); - if (result == net::OK || !force_ || retry_) return DoCallback(result); diff --git a/chromium/net/disk_cache/disk_cache.h b/chromium/net/disk_cache/disk_cache.h index 769ab361b19..84c1ee99516 100644 --- a/chromium/net/disk_cache/disk_cache.h +++ b/chromium/net/disk_cache/disk_cache.h @@ -14,6 +14,7 @@ #include "base/basictypes.h" #include "base/memory/ref_counted.h" #include "base/memory/scoped_ptr.h" +#include "base/strings/string_split.h" #include "base/time/time.h" #include "net/base/cache_type.h" #include "net/base/completion_callback.h" @@ -149,8 +150,7 @@ class NET_EXPORT Backend { virtual scoped_ptr<Iterator> CreateIterator() = 0; // Return a list of cache statistics. - virtual void GetStats( - std::vector<std::pair<std::string, std::string> >* stats) = 0; + virtual void GetStats(base::StringPairs* stats) = 0; // Called whenever an external cache in the system reuses the resource // referred to by |key|. diff --git a/chromium/net/disk_cache/disk_cache_test_util.h b/chromium/net/disk_cache/disk_cache_test_util.h index ad6b6ac9414..0f40f5e44d4 100644 --- a/chromium/net/disk_cache/disk_cache_test_util.h +++ b/chromium/net/disk_cache/disk_cache_test_util.h @@ -10,7 +10,6 @@ #include "base/files/file_path.h" #include "base/message_loop/message_loop.h" #include "base/timer/timer.h" -#include "base/tuple.h" #include "build/build_config.h" // Re-creates a given test file inside the cache test folder. diff --git a/chromium/net/disk_cache/entry_unittest.cc b/chromium/net/disk_cache/entry_unittest.cc index aac45ac318e..228c654221e 100644 --- a/chromium/net/disk_cache/entry_unittest.cc +++ b/chromium/net/disk_cache/entry_unittest.cc @@ -670,7 +670,8 @@ void DiskCacheEntryTest::StreamAccess() { ASSERT_TRUE(NULL != entry); const int kReadBufferSize = 600; const int kFinalReadSize = kBufferSize - kReadBufferSize; - COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads); + static_assert(kFinalReadSize < kReadBufferSize, + "should be exactly two reads"); scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize)); for (int i = 0; i < kNumStreams; i++) { memset(buffer2->data(), 0, kReadBufferSize); @@ -1798,6 +1799,111 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) { GetAvailableRange(); } +// Tests that non-sequential writes that are not aligned with the minimum sparse +// data granularity (1024 bytes) do in fact result in dropped data. +TEST_F(DiskCacheEntryTest, SparseWriteDropped) { + InitCache(); + std::string key("the first key"); + disk_cache::Entry* entry; + ASSERT_EQ(net::OK, CreateEntry(key, &entry)); + + const int kSize = 180; + scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize)); + scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize)); + CacheTestFillBuffer(buf_1->data(), kSize, false); + + // Do small writes (180 bytes) that get increasingly close to a 1024-byte + // boundary. All data should be dropped until a boundary is crossed, at which + // point the data after the boundary is saved (at least for a while). + int offset = 1024 - 500; + int rv = 0; + net::TestCompletionCallback cb; + int64 start; + for (int i = 0; i < 5; i++) { + // Check result of last GetAvailableRange. + EXPECT_EQ(0, rv); + + rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback()); + EXPECT_EQ(kSize, cb.GetResult(rv)); + + rv = entry->GetAvailableRange(offset - 100, kSize, &start, cb.callback()); + EXPECT_EQ(0, cb.GetResult(rv)); + + rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback()); + rv = cb.GetResult(rv); + if (!rv) { + rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback()); + EXPECT_EQ(0, cb.GetResult(rv)); + rv = 0; + } + offset += 1024 * i + 100; + } + + // The last write started 100 bytes below a bundary, so there should be 80 + // bytes after the boundary. + EXPECT_EQ(80, rv); + EXPECT_EQ(1024 * 7, start); + rv = entry->ReadSparseData(start, buf_2.get(), kSize, cb.callback()); + EXPECT_EQ(80, cb.GetResult(rv)); + EXPECT_EQ(0, memcmp(buf_1.get()->data() + 100, buf_2.get()->data(), 80)); + + // And even that part is dropped when another write changes the offset. + offset = start; + rv = entry->WriteSparseData(0, buf_1.get(), kSize, cb.callback()); + EXPECT_EQ(kSize, cb.GetResult(rv)); + + rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback()); + EXPECT_EQ(0, cb.GetResult(rv)); + entry->Close(); +} + +// Tests that small sequential writes are not dropped. +TEST_F(DiskCacheEntryTest, SparseSquentialWriteNotDropped) { + InitCache(); + std::string key("the first key"); + disk_cache::Entry* entry; + ASSERT_EQ(net::OK, CreateEntry(key, &entry)); + + const int kSize = 180; + scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize)); + scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize)); + CacheTestFillBuffer(buf_1->data(), kSize, false); + + // Any starting offset is fine as long as it is 1024-bytes aligned. + int rv = 0; + net::TestCompletionCallback cb; + int64 start; + int64 offset = 1024 * 11; + for (; offset < 20000; offset += kSize) { + rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback()); + EXPECT_EQ(kSize, cb.GetResult(rv)); + + rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback()); + EXPECT_EQ(kSize, cb.GetResult(rv)); + EXPECT_EQ(offset, start); + + rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback()); + EXPECT_EQ(kSize, cb.GetResult(rv)); + EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize)); + } + + entry->Close(); + FlushQueueForTest(); + + // Verify again the last write made. + ASSERT_EQ(net::OK, OpenEntry(key, &entry)); + offset -= kSize; + rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback()); + EXPECT_EQ(kSize, cb.GetResult(rv)); + EXPECT_EQ(offset, start); + + rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback()); + EXPECT_EQ(kSize, cb.GetResult(rv)); + EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize)); + + entry->Close(); +} + void DiskCacheEntryTest::CouldBeSparse() { std::string key("the first key"); disk_cache::Entry* entry; @@ -2133,7 +2239,11 @@ void DiskCacheEntryTest::PartialSparseEntry() { EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize)); // This read should not change anything. - EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize)); + if (memory_only_ || simple_cache_mode_) + EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize)); + else + EXPECT_EQ(0, ReadSparseData(entry, 24000, buf2.get(), kSize)); + EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize)); EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize)); @@ -2153,7 +2263,11 @@ void DiskCacheEntryTest::PartialSparseEntry() { EXPECT_EQ(500, cb.GetResult(rv)); EXPECT_EQ(kSize, start); rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback()); - EXPECT_EQ(3616, cb.GetResult(rv)); + if (memory_only_ || simple_cache_mode_) + EXPECT_EQ(3616, cb.GetResult(rv)); + else + EXPECT_EQ(3072, cb.GetResult(rv)); + EXPECT_EQ(20 * 1024, start); // 1. Query before a filled 1KB block. diff --git a/chromium/net/disk_cache/memory/mem_backend_impl.cc b/chromium/net/disk_cache/memory/mem_backend_impl.cc index cc33a002c7f..8350963c6e3 100644 --- a/chromium/net/disk_cache/memory/mem_backend_impl.cc +++ b/chromium/net/disk_cache/memory/mem_backend_impl.cc @@ -76,7 +76,8 @@ bool MemBackendImpl::Init() { } bool MemBackendImpl::SetMaxSize(int max_bytes) { - COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); + static_assert(sizeof(max_bytes) == sizeof(max_size_), + "unsupported int model"); if (max_bytes < 0) return false; diff --git a/chromium/net/disk_cache/memory/mem_backend_impl.h b/chromium/net/disk_cache/memory/mem_backend_impl.h index 7b18ca06138..6be8e946797 100644 --- a/chromium/net/disk_cache/memory/mem_backend_impl.h +++ b/chromium/net/disk_cache/memory/mem_backend_impl.h @@ -10,6 +10,7 @@ #include "base/compiler_specific.h" #include "base/containers/hash_tables.h" #include "base/memory/weak_ptr.h" +#include "base/strings/string_split.h" #include "net/disk_cache/disk_cache.h" #include "net/disk_cache/memory/mem_rankings.h" @@ -80,8 +81,7 @@ class NET_EXPORT_PRIVATE MemBackendImpl : public Backend { int DoomEntriesSince(base::Time initial_time, const CompletionCallback& callback) override; scoped_ptr<Iterator> CreateIterator() override; - void GetStats( - std::vector<std::pair<std::string, std::string>>* stats) override {} + void GetStats(base::StringPairs* stats) override {} void OnExternalCacheHit(const std::string& key) override; private: diff --git a/chromium/net/disk_cache/memory/mem_entry_impl.cc b/chromium/net/disk_cache/memory/mem_entry_impl.cc index 7c6199b951b..a5d9086ec0e 100644 --- a/chromium/net/disk_cache/memory/mem_entry_impl.cc +++ b/chromium/net/disk_cache/memory/mem_entry_impl.cc @@ -48,7 +48,7 @@ std::string GenerateChildName(const std::string& base_name, int child_id) { base::Value* NetLogChildEntryCreationCallback( const disk_cache::MemEntryImpl* parent, int child_id, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { base::DictionaryValue* dict = new base::DictionaryValue(); dict->SetString("key", GenerateChildName(parent->GetKey(), child_id)); dict->SetBoolean("created", true); @@ -185,7 +185,7 @@ int32 MemEntryImpl::GetDataSize(int index) const { int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_READ_DATA, CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); @@ -193,7 +193,7 @@ int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, int result = InternalReadData(index, offset, buf, buf_len); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.EndEvent( net::NetLog::TYPE_ENTRY_READ_DATA, CreateNetLogReadWriteCompleteCallback(result)); @@ -203,7 +203,7 @@ int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, int MemEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback, bool truncate) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); @@ -211,7 +211,7 @@ int MemEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, int result = InternalWriteData(index, offset, buf, buf_len, truncate); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.EndEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, CreateNetLogReadWriteCompleteCallback(result)); @@ -221,39 +221,39 @@ int MemEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, int MemEntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_SPARSE_READ, CreateNetLogSparseOperationCallback(offset, buf_len)); } int result = InternalReadSparseData(offset, buf, buf_len); - if (net_log_.IsLogging()) + if (net_log_.IsCapturing()) net_log_.EndEvent(net::NetLog::TYPE_SPARSE_READ); return result; } int MemEntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, const CompletionCallback& callback) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_SPARSE_WRITE, CreateNetLogSparseOperationCallback(offset, buf_len)); } int result = InternalWriteSparseData(offset, buf, buf_len); - if (net_log_.IsLogging()) + if (net_log_.IsCapturing()) net_log_.EndEvent(net::NetLog::TYPE_SPARSE_WRITE); return result; } int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start, const CompletionCallback& callback) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_SPARSE_GET_RANGE, CreateNetLogSparseOperationCallback(offset, len)); } int result = GetAvailableRange(offset, len, start); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.EndEvent( net::NetLog::TYPE_SPARSE_GET_RANGE, CreateNetLogGetAvailableRangeResultCallback(*start, result)); @@ -373,7 +373,7 @@ int MemEntryImpl::InternalReadSparseData(int64 offset, IOBuffer* buf, // we should stop. if (child_offset < child->child_first_pos_) break; - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, CreateNetLogSparseReadWriteCallback(child->net_log().source(), @@ -381,7 +381,7 @@ int MemEntryImpl::InternalReadSparseData(int64 offset, IOBuffer* buf, } int ret = child->ReadData(kSparseData, child_offset, io_buf.get(), io_buf->BytesRemaining(), CompletionCallback()); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.EndEventWithNetErrorCode( net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, ret); } @@ -430,7 +430,7 @@ int MemEntryImpl::InternalWriteSparseData(int64 offset, IOBuffer* buf, // Keep a record of the last byte position (exclusive) in the child. int data_size = child->GetDataSize(kSparseData); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.BeginEvent( net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, CreateNetLogSparseReadWriteCallback(child->net_log().source(), @@ -443,7 +443,7 @@ int MemEntryImpl::InternalWriteSparseData(int64 offset, IOBuffer* buf, // continuous we may want to discard this write. int ret = child->WriteData(kSparseData, child_offset, io_buf.get(), write_len, CompletionCallback(), true); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.EndEventWithNetErrorCode( net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, ret); } diff --git a/chromium/net/disk_cache/memory/mem_entry_impl.h b/chromium/net/disk_cache/memory/mem_entry_impl.h index 0d9acbe916c..fe5104bb845 100644 --- a/chromium/net/disk_cache/memory/mem_entry_impl.h +++ b/chromium/net/disk_cache/memory/mem_entry_impl.h @@ -8,8 +8,8 @@ #include "base/containers/hash_tables.h" #include "base/gtest_prod_util.h" #include "base/memory/scoped_ptr.h" -#include "net/base/net_log.h" #include "net/disk_cache/disk_cache.h" +#include "net/log/net_log.h" namespace disk_cache { diff --git a/chromium/net/disk_cache/net_log_parameters.cc b/chromium/net/disk_cache/net_log_parameters.cc index 5d7e50f5595..b13f916682e 100644 --- a/chromium/net/disk_cache/net_log_parameters.cc +++ b/chromium/net/disk_cache/net_log_parameters.cc @@ -16,7 +16,7 @@ namespace { base::Value* NetLogEntryCreationCallback( const disk_cache::Entry* entry, bool created, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { base::DictionaryValue* dict = new base::DictionaryValue(); dict->SetString("key", entry->GetKey()); dict->SetBoolean("created", created); @@ -28,7 +28,7 @@ base::Value* NetLogReadWriteDataCallback( int offset, int buf_len, bool truncate, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { base::DictionaryValue* dict = new base::DictionaryValue(); dict->SetInteger("index", index); dict->SetInteger("offset", offset); @@ -40,7 +40,7 @@ base::Value* NetLogReadWriteDataCallback( base::Value* NetLogReadWriteCompleteCallback( int bytes_copied, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { DCHECK_NE(bytes_copied, net::ERR_IO_PENDING); base::DictionaryValue* dict = new base::DictionaryValue(); if (bytes_copied < 0) { @@ -54,7 +54,7 @@ base::Value* NetLogReadWriteCompleteCallback( base::Value* NetLogSparseOperationCallback( int64 offset, int buff_len, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { base::DictionaryValue* dict = new base::DictionaryValue(); // Values can only be created with at most 32-bit integers. Using a string // instead circumvents that restriction. @@ -66,7 +66,7 @@ base::Value* NetLogSparseOperationCallback( base::Value* NetLogSparseReadWriteCallback( const net::NetLog::Source& source, int child_len, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { base::DictionaryValue* dict = new base::DictionaryValue(); source.AddToEventParameters(dict); dict->SetInteger("child_len", child_len); @@ -76,7 +76,7 @@ base::Value* NetLogSparseReadWriteCallback( base::Value* NetLogGetAvailableRangeResultCallback( int64 start, int result, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { base::DictionaryValue* dict = new base::DictionaryValue(); if (result > 0) { dict->SetInteger("length", result); diff --git a/chromium/net/disk_cache/net_log_parameters.h b/chromium/net/disk_cache/net_log_parameters.h index 3598cda7b12..6e144201c8e 100644 --- a/chromium/net/disk_cache/net_log_parameters.h +++ b/chromium/net/disk_cache/net_log_parameters.h @@ -7,7 +7,7 @@ #include <string> -#include "net/base/net_log.h" +#include "net/log/net_log.h" // This file contains a set of functions to create NetLog::ParametersCallbacks // shared by EntryImpls and MemEntryImpls. diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.cc b/chromium/net/disk_cache/simple/simple_backend_impl.cc index 6385a3763c2..adb54ee3483 100644 --- a/chromium/net/disk_cache/simple/simple_backend_impl.cc +++ b/chromium/net/disk_cache/simple/simple_backend_impl.cc @@ -15,6 +15,7 @@ #include "base/bind.h" #include "base/callback.h" #include "base/files/file_util.h" +#include "base/lazy_instance.h" #include "base/location.h" #include "base/metrics/field_trial.h" #include "base/metrics/histogram.h" @@ -50,32 +51,34 @@ namespace { // Maximum number of concurrent worker pool threads, which also is the limit // on concurrent IO (as we use one thread per IO request). -const int kDefaultMaxWorkerThreads = 50; +const size_t kMaxWorkerThreads = 5U; const char kThreadNamePrefix[] = "SimpleCache"; // Maximum fraction of the cache that one entry can consume. const int kMaxFileRatio = 8; -// A global sequenced worker pool to use for launching all tasks. -SequencedWorkerPool* g_sequenced_worker_pool = NULL; - -void MaybeCreateSequencedWorkerPool() { - if (!g_sequenced_worker_pool) { - int max_worker_threads = kDefaultMaxWorkerThreads; +class LeakySequencedWorkerPool { + public: + LeakySequencedWorkerPool() + : sequenced_worker_pool_( + new SequencedWorkerPool(kMaxWorkerThreads, kThreadNamePrefix)) {} - const std::string thread_count_field_trial = - base::FieldTrialList::FindFullName("SimpleCacheMaxThreads"); - if (!thread_count_field_trial.empty()) { - max_worker_threads = - std::max(1, std::atoi(thread_count_field_trial.c_str())); - } + void FlushForTesting() { sequenced_worker_pool_->FlushForTesting(); } - g_sequenced_worker_pool = new SequencedWorkerPool(max_worker_threads, - kThreadNamePrefix); - g_sequenced_worker_pool->AddRef(); // Leak it. + scoped_refptr<base::TaskRunner> GetTaskRunner() { + return sequenced_worker_pool_->GetTaskRunnerWithShutdownBehavior( + SequencedWorkerPool::CONTINUE_ON_SHUTDOWN); } -} + + private: + scoped_refptr<SequencedWorkerPool> sequenced_worker_pool_; + + DISALLOW_COPY_AND_ASSIGN(LeakySequencedWorkerPool); +}; + +base::LazyInstance<LeakySequencedWorkerPool>::Leaky g_sequenced_worker_pool = + LAZY_INSTANCE_INITIALIZER; bool g_fd_limit_histogram_has_been_populated = false; @@ -245,10 +248,7 @@ SimpleBackendImpl::~SimpleBackendImpl() { } int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) { - MaybeCreateSequencedWorkerPool(); - - worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior( - SequencedWorkerPool::CONTINUE_ON_SHUTDOWN); + worker_pool_ = g_sequenced_worker_pool.Get().GetTaskRunner(); index_.reset(new SimpleIndex( base::ThreadTaskRunnerHandle::Get(), @@ -271,12 +271,15 @@ int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) { } bool SimpleBackendImpl::SetMaxSize(int max_bytes) { + if (max_bytes < 0) + return false; orig_max_size_ = max_bytes; - return index_->SetMaxSize(max_bytes); + index_->SetMaxSize(max_bytes); + return true; } int SimpleBackendImpl::GetMaxFileSize() const { - return index_->max_size() / kMaxFileRatio; + return static_cast<int>(index_->max_size() / kMaxFileRatio); } void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) { @@ -541,8 +544,7 @@ scoped_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() { return scoped_ptr<Iterator>(new SimpleIterator(AsWeakPtr())); } -void SimpleBackendImpl::GetStats( - std::vector<std::pair<std::string, std::string> >* stats) { +void SimpleBackendImpl::GetStats(base::StringPairs* stats) { std::pair<std::string, std::string> item; item.first = "Cache type"; item.second = "Simple Cache"; @@ -730,9 +732,11 @@ void SimpleBackendImpl::DoomEntriesComplete( callback.Run(result); } +// static void SimpleBackendImpl::FlushWorkerPoolForTesting() { - if (g_sequenced_worker_pool) - g_sequenced_worker_pool->FlushForTesting(); + // We only need to do this if we there is an active task runner. + if (base::ThreadTaskRunnerHandle::IsSet()) + g_sequenced_worker_pool.Get().FlushForTesting(); } } // namespace disk_cache diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.h b/chromium/net/disk_cache/simple/simple_backend_impl.h index 503fc4927b7..7993fdcd853 100644 --- a/chromium/net/disk_cache/simple/simple_backend_impl.h +++ b/chromium/net/disk_cache/simple/simple_backend_impl.h @@ -16,6 +16,7 @@ #include "base/memory/ref_counted.h" #include "base/memory/scoped_ptr.h" #include "base/memory/weak_ptr.h" +#include "base/strings/string_split.h" #include "base/task_runner.h" #include "base/time/time.h" #include "net/base/cache_type.h" @@ -105,8 +106,7 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend, int DoomEntriesSince(base::Time initial_time, const CompletionCallback& callback) override; scoped_ptr<Iterator> CreateIterator() override; - void GetStats( - std::vector<std::pair<std::string, std::string>>* stats) override; + void GetStats(base::StringPairs* stats) override; void OnExternalCacheHit(const std::string& key) override; private: diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.cc b/chromium/net/disk_cache/simple/simple_entry_impl.cc index a6de8782b24..b7c852492a6 100644 --- a/chromium/net/disk_cache/simple/simple_entry_impl.cc +++ b/chromium/net/disk_cache/simple/simple_entry_impl.cc @@ -142,7 +142,6 @@ void InvokeCallbackIfBackendIsAlive( using base::Closure; using base::FilePath; -using base::MessageLoopProxy; using base::Time; using base::TaskRunner; @@ -185,14 +184,14 @@ SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type, net_log_(net::BoundNetLog::Make( net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)), stream_0_data_(new net::GrowableIOBuffer()) { - COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), - arrays_should_be_same_size); - COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), - arrays_should_be_same_size); - COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), - arrays_should_be_same_size); - COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_), - arrays_should_be_same_size); + static_assert(arraysize(data_size_) == arraysize(crc32s_end_offset_), + "arrays should be the same size"); + static_assert(arraysize(data_size_) == arraysize(crc32s_), + "arrays should be the same size"); + static_assert(arraysize(data_size_) == arraysize(have_written_), + "arrays should be the same size"); + static_assert(arraysize(data_size_) == arraysize(crc_check_state_), + "arrays should be the same size"); MakeUninitialized(); net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, CreateNetLogSimpleEntryConstructionCallback(this)); @@ -347,7 +346,7 @@ int SimpleEntryImpl::ReadData(int stream_index, const CompletionCallback& callback) { DCHECK(io_thread_checker_.CalledOnValidThread()); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, false)); @@ -355,7 +354,7 @@ int SimpleEntryImpl::ReadData(int stream_index, if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || buf_len < 0) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); } @@ -365,7 +364,7 @@ int SimpleEntryImpl::ReadData(int stream_index, } if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len)) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, CreateNetLogReadWriteCompleteCallback(0)); } @@ -394,7 +393,7 @@ int SimpleEntryImpl::WriteData(int stream_index, bool truncate) { DCHECK(io_thread_checker_.CalledOnValidThread()); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, @@ -403,7 +402,7 @@ int SimpleEntryImpl::WriteData(int stream_index, if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || offset < 0 || buf_len < 0) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); @@ -412,7 +411,7 @@ int SimpleEntryImpl::WriteData(int stream_index, return net::ERR_INVALID_ARGUMENT; } if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); @@ -454,7 +453,7 @@ int SimpleEntryImpl::WriteData(int stream_index, } op_callback = CompletionCallback(); ret_value = buf_len; - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC, CreateNetLogReadWriteCompleteCallback(buf_len)); @@ -809,7 +808,7 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index, DCHECK(io_thread_checker_.CalledOnValidThread()); ScopedOperationRunner operation_runner(this); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN, CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, @@ -825,7 +824,7 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index, base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::Bind(callback, net::ERR_FAILED)); } - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); @@ -892,7 +891,7 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index, DCHECK(io_thread_checker_.CalledOnValidThread()); ScopedOperationRunner operation_runner(this); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN, CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, @@ -901,7 +900,7 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index, if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); @@ -1022,9 +1021,9 @@ void SimpleEntryImpl::WriteSparseDataInternal( DCHECK_EQ(STATE_READY, state_); state_ = STATE_IO_PENDING; - int64 max_sparse_data_size = kint64max; + uint64 max_sparse_data_size = kint64max; if (backend_.get()) { - int64 max_cache_size = backend_->index()->max_size(); + uint64 max_cache_size = backend_->index()->max_size(); max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor; } @@ -1230,7 +1229,7 @@ void SimpleEntryImpl::ReadOperationComplete( crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; } } - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, CreateNetLogReadWriteCompleteCallback(*result)); @@ -1248,7 +1247,7 @@ void SimpleEntryImpl::WriteOperationComplete( RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); else RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, CreateNetLogReadWriteCompleteCallback(*result)); } @@ -1319,7 +1318,7 @@ void SimpleEntryImpl::ChecksumOperationComplete( DCHECK_EQ(STATE_IO_PENDING, state_); DCHECK(result); - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEventWithNetErrorCode( net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END, *result); @@ -1334,7 +1333,7 @@ void SimpleEntryImpl::ChecksumOperationComplete( } else { RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE); } - if (net_log_.IsLogging()) { + if (net_log_.IsCapturing()) { net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, CreateNetLogReadWriteCompleteCallback(*result)); } diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.h b/chromium/net/disk_cache/simple/simple_entry_impl.h index ebc6bf15e19..f66c9f05328 100644 --- a/chromium/net/disk_cache/simple/simple_entry_impl.h +++ b/chromium/net/disk_cache/simple/simple_entry_impl.h @@ -14,10 +14,10 @@ #include "base/threading/thread_checker.h" #include "net/base/cache_type.h" #include "net/base/net_export.h" -#include "net/base/net_log.h" #include "net/disk_cache/disk_cache.h" #include "net/disk_cache/simple/simple_entry_format.h" #include "net/disk_cache/simple/simple_entry_operation.h" +#include "net/log/net_log.h" namespace base { class TaskRunner; diff --git a/chromium/net/disk_cache/simple/simple_entry_operation.h b/chromium/net/disk_cache/simple/simple_entry_operation.h index 08863124cfc..b2f5502a00b 100644 --- a/chromium/net/disk_cache/simple/simple_entry_operation.h +++ b/chromium/net/disk_cache/simple/simple_entry_operation.h @@ -7,7 +7,7 @@ #include "base/memory/ref_counted.h" #include "net/base/completion_callback.h" -#include "net/base/net_log.h" +#include "net/log/net_log.h" namespace net { class IOBuffer; diff --git a/chromium/net/disk_cache/simple/simple_index.cc b/chromium/net/disk_cache/simple/simple_index.cc index 66d9f425fb1..6eb60bd57ab 100644 --- a/chromium/net/disk_cache/simple/simple_index.cc +++ b/chromium/net/disk_cache/simple/simple_index.cc @@ -82,9 +82,9 @@ EntryMetadata::EntryMetadata() entry_size_(0) { } -EntryMetadata::EntryMetadata(base::Time last_used_time, int entry_size) +EntryMetadata::EntryMetadata(base::Time last_used_time, uint64 entry_size) : last_used_time_seconds_since_epoch_(0), - entry_size_(entry_size) { + entry_size_(base::checked_cast<int32>(entry_size)) { SetLastUsedTime(last_used_time); } @@ -111,6 +111,14 @@ void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) { last_used_time_seconds_since_epoch_ = 1; } +uint64 EntryMetadata::GetEntrySize() const { + return entry_size_; +} + +void EntryMetadata::SetEntrySize(uint64 entry_size) { + entry_size_ = base::checked_cast<int32>(entry_size); +} + void EntryMetadata::Serialize(Pickle* pickle) const { DCHECK(pickle); int64 internal_last_used_time = GetLastUsedTime().ToInternalValue(); @@ -122,10 +130,11 @@ bool EntryMetadata::Deserialize(PickleIterator* it) { DCHECK(it); int64 tmp_last_used_time; uint64 tmp_entry_size; - if (!it->ReadInt64(&tmp_last_used_time) || !it->ReadUInt64(&tmp_entry_size)) + if (!it->ReadInt64(&tmp_last_used_time) || !it->ReadUInt64(&tmp_entry_size) || + tmp_entry_size > static_cast<uint64>(std::numeric_limits<int32>::max())) return false; SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time)); - entry_size_ = tmp_entry_size; + entry_size_ = static_cast<int32>(tmp_entry_size); return true; } @@ -179,18 +188,13 @@ void SimpleIndex::Initialize(base::Time cache_mtime) { index_file_->LoadIndexEntries(cache_mtime, reply, load_result); } -bool SimpleIndex::SetMaxSize(int max_bytes) { - if (max_bytes < 0) - return false; - +void SimpleIndex::SetMaxSize(uint64 max_bytes) { // Zero size means use the default. - if (!max_bytes) - return true; - - max_size_ = max_bytes; - high_watermark_ = max_size_ - max_size_ / kEvictionMarginDivisor; - low_watermark_ = max_size_ - 2 * (max_size_ / kEvictionMarginDivisor); - return true; + if (max_bytes) { + max_size_ = max_bytes; + high_watermark_ = max_size_ - max_size_ / kEvictionMarginDivisor; + low_watermark_ = max_size_ - 2 * (max_size_ / kEvictionMarginDivisor); + } } int SimpleIndex::ExecuteWhenReady(const net::CompletionCallback& task) { @@ -286,12 +290,12 @@ void SimpleIndex::StartEvictionIfNeeded() { // Take all live key hashes from the index and sort them by time. eviction_in_progress_ = true; eviction_start_time_ = base::TimeTicks::Now(); - SIMPLE_CACHE_UMA(MEMORY_KB, - "Eviction.CacheSizeOnStart2", cache_type_, - cache_size_ / kBytesInKb); - SIMPLE_CACHE_UMA(MEMORY_KB, - "Eviction.MaxCacheSizeOnStart2", cache_type_, - max_size_ / kBytesInKb); + SIMPLE_CACHE_UMA( + MEMORY_KB, "Eviction.CacheSizeOnStart2", cache_type_, + static_cast<base::HistogramBase::Sample>(cache_size_ / kBytesInKb)); + SIMPLE_CACHE_UMA( + MEMORY_KB, "Eviction.MaxCacheSizeOnStart2", cache_type_, + static_cast<base::HistogramBase::Sample>(max_size_ / kBytesInKb)); std::vector<uint64> entry_hashes; entry_hashes.reserve(entries_set_.size()); for (EntrySet::const_iterator it = entries_set_.begin(), @@ -308,8 +312,7 @@ void SimpleIndex::StartEvictionIfNeeded() { DCHECK(it != entry_hashes.end()); EntrySet::iterator found_meta = entries_set_.find(*it); DCHECK(found_meta != entries_set_.end()); - uint64 to_evict_size = found_meta->second.GetEntrySize(); - evicted_so_far_size += to_evict_size; + evicted_so_far_size += found_meta->second.GetEntrySize(); ++it; } @@ -320,15 +323,16 @@ void SimpleIndex::StartEvictionIfNeeded() { SIMPLE_CACHE_UMA(TIMES, "Eviction.TimeToSelectEntries", cache_type_, base::TimeTicks::Now() - eviction_start_time_); - SIMPLE_CACHE_UMA(MEMORY_KB, - "Eviction.SizeOfEvicted2", cache_type_, - evicted_so_far_size / kBytesInKb); + SIMPLE_CACHE_UMA( + MEMORY_KB, "Eviction.SizeOfEvicted2", cache_type_, + static_cast<base::HistogramBase::Sample>( + evicted_so_far_size / kBytesInKb)); delegate_->DoomEntries(&entry_hashes, base::Bind(&SimpleIndex::EvictionDone, AsWeakPtr())); } -bool SimpleIndex::UpdateEntrySize(uint64 entry_hash, int entry_size) { +bool SimpleIndex::UpdateEntrySize(uint64 entry_hash, int64 entry_size) { DCHECK(io_thread_checker_.CalledOnValidThread()); EntrySet::iterator it = entries_set_.find(entry_hash); if (it == entries_set_.end()) @@ -349,9 +353,9 @@ void SimpleIndex::EvictionDone(int result) { SIMPLE_CACHE_UMA(TIMES, "Eviction.TimeToDone", cache_type_, base::TimeTicks::Now() - eviction_start_time_); - SIMPLE_CACHE_UMA(MEMORY_KB, - "Eviction.SizeWhenDone2", cache_type_, - cache_size_ / kBytesInKb); + SIMPLE_CACHE_UMA( + MEMORY_KB, "Eviction.SizeWhenDone2", cache_type_, + static_cast<base::HistogramBase::Sample>(cache_size_ / kBytesInKb)); } // static @@ -374,10 +378,10 @@ void SimpleIndex::PostponeWritingToDisk() { } void SimpleIndex::UpdateEntryIteratorSize(EntrySet::iterator* it, - int entry_size) { + int64 entry_size) { // Update the total cache size with the new entry size. DCHECK(io_thread_checker_.CalledOnValidThread()); - DCHECK_GE(cache_size_, implicit_cast<uint64>((*it)->second.GetEntrySize())); + DCHECK_GE(cache_size_, (*it)->second.GetEntrySize()); cache_size_ -= (*it)->second.GetEntrySize(); cache_size_ += entry_size; (*it)->second.SetEntrySize(entry_size); diff --git a/chromium/net/disk_cache/simple/simple_index.h b/chromium/net/disk_cache/simple/simple_index.h index 2b7e1cd0bef..099d7fdb80c 100644 --- a/chromium/net/disk_cache/simple/simple_index.h +++ b/chromium/net/disk_cache/simple/simple_index.h @@ -40,13 +40,13 @@ struct SimpleIndexLoadResult; class NET_EXPORT_PRIVATE EntryMetadata { public: EntryMetadata(); - EntryMetadata(base::Time last_used_time, int entry_size); + EntryMetadata(base::Time last_used_time, uint64 entry_size); base::Time GetLastUsedTime() const; void SetLastUsedTime(const base::Time& last_used_time); - int GetEntrySize() const { return entry_size_; } - void SetEntrySize(int entry_size) { entry_size_ = entry_size; } + uint64 GetEntrySize() const; + void SetEntrySize(uint64 entry_size); // Serialize the data into the provided pickle. void Serialize(Pickle* pickle) const; @@ -69,7 +69,7 @@ class NET_EXPORT_PRIVATE EntryMetadata { uint32 last_used_time_seconds_since_epoch_; int32 entry_size_; // Storage size in bytes. }; -COMPILE_ASSERT(sizeof(EntryMetadata) == 8, metadata_size); +static_assert(sizeof(EntryMetadata) == 8, "incorrect metadata size"); // This class is not Thread-safe. class NET_EXPORT_PRIVATE SimpleIndex @@ -86,8 +86,8 @@ class NET_EXPORT_PRIVATE SimpleIndex void Initialize(base::Time cache_mtime); - bool SetMaxSize(int max_bytes); - int max_size() const { return max_size_; } + void SetMaxSize(uint64 max_bytes); + uint64 max_size() const { return max_size_; } void Insert(uint64 entry_hash); void Remove(uint64 entry_hash); @@ -104,7 +104,7 @@ class NET_EXPORT_PRIVATE SimpleIndex // Update the size (in bytes) of an entry, in the metadata stored in the // index. This should be the total disk-file size including all streams of the // entry. - bool UpdateEntrySize(uint64 entry_hash, int entry_size); + bool UpdateEntrySize(uint64 entry_hash, int64 entry_size); typedef base::hash_map<uint64, EntryMetadata> EntrySet; @@ -143,7 +143,7 @@ class NET_EXPORT_PRIVATE SimpleIndex void PostponeWritingToDisk(); - void UpdateEntryIteratorSize(EntrySet::iterator* it, int entry_size); + void UpdateEntryIteratorSize(EntrySet::iterator* it, int64 entry_size); // Must run on IO Thread. void MergeInitializingSet(scoped_ptr<SimpleIndexLoadResult> load_result); diff --git a/chromium/net/disk_cache/simple/simple_index_unittest.cc b/chromium/net/disk_cache/simple/simple_index_unittest.cc index 1096488448a..a74cb108d12 100644 --- a/chromium/net/disk_cache/simple/simple_index_unittest.cc +++ b/chromium/net/disk_cache/simple/simple_index_unittest.cc @@ -28,7 +28,7 @@ namespace { const base::Time kTestLastUsedTime = base::Time::UnixEpoch() + base::TimeDelta::FromDays(20); -const int kTestEntrySize = 789; +const uint64 kTestEntrySize = 789; } // namespace @@ -171,7 +171,7 @@ class SimpleIndexTest : public testing::Test, public SimpleIndexDelegate { TEST_F(EntryMetadataTest, Basics) { EntryMetadata entry_metadata; EXPECT_EQ(base::Time(), entry_metadata.GetLastUsedTime()); - EXPECT_EQ(0, entry_metadata.GetEntrySize()); + EXPECT_EQ(0U, entry_metadata.GetEntrySize()); entry_metadata = NewEntryMetadataWithValues(); CheckEntryMetadataValues(entry_metadata); @@ -231,7 +231,7 @@ TEST_F(SimpleIndexTest, BasicInsertRemove) { // Confirm blank state. EntryMetadata metadata; EXPECT_EQ(base::Time(), metadata.GetLastUsedTime()); - EXPECT_EQ(0, metadata.GetEntrySize()); + EXPECT_EQ(0U, metadata.GetEntrySize()); // Confirm state after insert. index()->Insert(hashes_.at<1>()); @@ -239,14 +239,14 @@ TEST_F(SimpleIndexTest, BasicInsertRemove) { base::Time now(base::Time::Now()); EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); - EXPECT_EQ(0, metadata.GetEntrySize()); + EXPECT_EQ(0U, metadata.GetEntrySize()); // Confirm state after remove. metadata = EntryMetadata(); index()->Remove(hashes_.at<1>()); EXPECT_FALSE(GetEntryForTesting(hashes_.at<1>(), &metadata)); EXPECT_EQ(base::Time(), metadata.GetLastUsedTime()); - EXPECT_EQ(0, metadata.GetEntrySize()); + EXPECT_EQ(0U, metadata.GetEntrySize()); } TEST_F(SimpleIndexTest, Has) { @@ -330,11 +330,11 @@ TEST_F(SimpleIndexTest, UpdateEntrySize) { EXPECT_GT( now - base::TimeDelta::FromDays(2) + base::TimeDelta::FromSeconds(1), metadata.GetLastUsedTime()); - EXPECT_EQ(475, metadata.GetEntrySize()); + EXPECT_EQ(475U, metadata.GetEntrySize()); index()->UpdateEntrySize(kHash1, 600u); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata)); - EXPECT_EQ(600, metadata.GetEntrySize()); + EXPECT_EQ(600U, metadata.GetEntrySize()); EXPECT_EQ(1, index()->GetEntryCount()); } @@ -383,7 +383,7 @@ TEST_F(SimpleIndexTest, BasicInit) { EXPECT_GT( now - base::TimeDelta::FromDays(2) + base::TimeDelta::FromSeconds(1), metadata.GetLastUsedTime()); - EXPECT_EQ(10, metadata.GetEntrySize()); + EXPECT_EQ(10U, metadata.GetEntrySize()); EXPECT_TRUE(GetEntryForTesting(hashes_.at<2>(), &metadata)); EXPECT_LT( now - base::TimeDelta::FromDays(3) - base::TimeDelta::FromSeconds(1), @@ -391,7 +391,7 @@ TEST_F(SimpleIndexTest, BasicInit) { EXPECT_GT( now - base::TimeDelta::FromDays(3) + base::TimeDelta::FromSeconds(1), metadata.GetLastUsedTime()); - EXPECT_EQ(100, metadata.GetEntrySize()); + EXPECT_EQ(100U, metadata.GetEntrySize()); } // Remove something that's going to come in from the loaded index. @@ -423,7 +423,7 @@ TEST_F(SimpleIndexTest, InsertBeforeInit) { base::Time now(base::Time::Now()); EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); - EXPECT_EQ(0, metadata.GetEntrySize()); + EXPECT_EQ(0U, metadata.GetEntrySize()); } // Insert and Remove something that's going to come in from the loaded index. @@ -456,7 +456,7 @@ TEST_F(SimpleIndexTest, RemoveInsertBeforeInit) { base::Time now(base::Time::Now()); EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); - EXPECT_EQ(0, metadata.GetEntrySize()); + EXPECT_EQ(0U, metadata.GetEntrySize()); } // Do all above tests at once + a non-conflict to test for cross-key @@ -494,14 +494,14 @@ TEST_F(SimpleIndexTest, AllInitConflicts) { EXPECT_TRUE(GetEntryForTesting(hashes_.at<2>(), &metadata)); EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); - EXPECT_EQ(0, metadata.GetEntrySize()); + EXPECT_EQ(0U, metadata.GetEntrySize()); EXPECT_FALSE(index()->Has(hashes_.at<3>())); EXPECT_TRUE(GetEntryForTesting(hashes_.at<4>(), &metadata)); EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime()); - EXPECT_EQ(0, metadata.GetEntrySize()); + EXPECT_EQ(0U, metadata.GetEntrySize()); EXPECT_TRUE(GetEntryForTesting(hashes_.at<5>(), &metadata)); @@ -512,7 +512,7 @@ TEST_F(SimpleIndexTest, AllInitConflicts) { now - base::TimeDelta::FromDays(6) - base::TimeDelta::FromSeconds(1), metadata.GetLastUsedTime()); - EXPECT_EQ(100000, metadata.GetEntrySize()); + EXPECT_EQ(100000U, metadata.GetEntrySize()); } TEST_F(SimpleIndexTest, BasicEviction) { @@ -601,7 +601,7 @@ TEST_F(SimpleIndexTest, DiskWriteExecuted) { const EntryMetadata& entry1(entry_set.begin()->second); EXPECT_LT(now - base::TimeDelta::FromMinutes(1), entry1.GetLastUsedTime()); EXPECT_GT(now + base::TimeDelta::FromMinutes(1), entry1.GetLastUsedTime()); - EXPECT_EQ(20, entry1.GetEntrySize()); + EXPECT_EQ(20U, entry1.GetEntrySize()); } TEST_F(SimpleIndexTest, DiskWritePostponed) { diff --git a/chromium/net/disk_cache/simple/simple_net_log_parameters.cc b/chromium/net/disk_cache/simple/simple_net_log_parameters.cc index 4756c83b9fd..f3d9fe5b40e 100644 --- a/chromium/net/disk_cache/simple/simple_net_log_parameters.cc +++ b/chromium/net/disk_cache/simple/simple_net_log_parameters.cc @@ -17,7 +17,7 @@ namespace { base::Value* NetLogSimpleEntryConstructionCallback( const disk_cache::SimpleEntryImpl* entry, - net::NetLog::LogLevel log_level) { + net::NetLogCaptureMode capture_mode) { base::DictionaryValue* dict = new base::DictionaryValue(); dict->SetString("entry_hash", base::StringPrintf("%#016" PRIx64, entry->entry_hash())); @@ -27,7 +27,7 @@ base::Value* NetLogSimpleEntryConstructionCallback( base::Value* NetLogSimpleEntryCreationCallback( const disk_cache::SimpleEntryImpl* entry, int net_error, - net::NetLog::LogLevel /* log_level */) { + net::NetLogCaptureMode /* capture_mode */) { base::DictionaryValue* dict = new base::DictionaryValue(); dict->SetInteger("net_error", net_error); if (net_error == net::OK) diff --git a/chromium/net/disk_cache/simple/simple_net_log_parameters.h b/chromium/net/disk_cache/simple/simple_net_log_parameters.h index 41ea9a2af91..a8e46ecf244 100644 --- a/chromium/net/disk_cache/simple/simple_net_log_parameters.h +++ b/chromium/net/disk_cache/simple/simple_net_log_parameters.h @@ -5,7 +5,7 @@ #ifndef NET_DISK_CACHE_SIMPLE_NET_LOG_PARAMETERS_H_ #define NET_DISK_CACHE_SIMPLE_NET_LOG_PARAMETERS_H_ -#include "net/base/net_log.h" +#include "net/log/net_log.h" // This file augments the functions in net/disk_cache/net_log_parameters.h to // include ones that deal with specifics of the Simple Cache backend. diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc index 52f8f08c35e..542cedef84c 100644 --- a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc +++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc @@ -14,9 +14,11 @@ #include "base/files/file_util.h" #include "base/hash.h" #include "base/location.h" +#include "base/metrics/histogram.h" #include "base/numerics/safe_conversions.h" #include "base/sha1.h" #include "base/strings/stringprintf.h" +#include "base/timer/elapsed_timer.h" #include "net/base/io_buffer.h" #include "net/base/net_errors.h" #include "net/disk_cache/simple/simple_backend_version.h" @@ -134,8 +136,8 @@ SimpleEntryStat::SimpleEntryStat(base::Time last_used, int SimpleEntryStat::GetOffsetInFile(const std::string& key, int offset, int stream_index) const { - const int64 headers_size = sizeof(SimpleFileHeader) + key.size(); - const int64 additional_offset = + const size_t headers_size = sizeof(SimpleFileHeader) + key.size(); + const size_t additional_offset = stream_index == 0 ? data_size_[1] + sizeof(SimpleFileEOF) : 0; return headers_size + offset + additional_offset; } @@ -154,8 +156,9 @@ int SimpleEntryStat::GetLastEOFOffsetInFile(const std::string& key, return GetOffsetInFile(key, eof_data_offset, stream_index); } -int SimpleEntryStat::GetFileSize(const std::string& key, int file_index) const { - const int total_data_size = +int64 SimpleEntryStat::GetFileSize(const std::string& key, + int file_index) const { + const int32 total_data_size = file_index == 0 ? data_size_[0] + data_size_[1] + sizeof(SimpleFileEOF) : data_size_[2]; return GetFileSizeFromKeyAndDataSize(key, total_data_size); @@ -215,6 +218,7 @@ void SimpleSynchronousEntry::OpenEntry( const uint64 entry_hash, bool had_index, SimpleEntryCreationResults *out_results) { + base::ElapsedTimer open_time; SimpleSynchronousEntry* sync_entry = new SimpleSynchronousEntry(cache_type, path, "", entry_hash); out_results->result = @@ -229,6 +233,7 @@ void SimpleSynchronousEntry::OpenEntry( out_results->stream_0_data = NULL; return; } + UMA_HISTOGRAM_TIMES("SimpleCache.DiskOpenLatency", open_time.Elapsed()); out_results->sync_entry = sync_entry; } @@ -451,7 +456,7 @@ void SimpleSynchronousEntry::ReadSparseData( void SimpleSynchronousEntry::WriteSparseData( const EntryOperationData& in_entry_op, net::IOBuffer* in_buf, - int64 max_sparse_data_size, + uint64 max_sparse_data_size, SimpleEntryStat* out_entry_stat, int* out_result) { DCHECK(initialized_); @@ -467,7 +472,7 @@ void SimpleSynchronousEntry::WriteSparseData( return; } - int64 sparse_data_size = out_entry_stat->sparse_data_size(); + uint64 sparse_data_size = out_entry_stat->sparse_data_size(); // This is a pessimistic estimate; it assumes the entire buffer is going to // be appended as a new range, not written over existing ranges. if (sparse_data_size + buf_len > max_sparse_data_size) { diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.h b/chromium/net/disk_cache/simple/simple_synchronous_entry.h index 739a4889493..7f98b21ceaa 100644 --- a/chromium/net/disk_cache/simple/simple_synchronous_entry.h +++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.h @@ -44,7 +44,7 @@ class NET_EXPORT_PRIVATE SimpleEntryStat { int stream_index) const; int GetEOFOffsetInFile(const std::string& key, int stream_index) const; int GetLastEOFOffsetInFile(const std::string& key, int file_index) const; - int GetFileSize(const std::string& key, int file_index) const; + int64 GetFileSize(const std::string& key, int file_index) const; base::Time last_used() const { return last_used_; } base::Time last_modified() const { return last_modified_; } @@ -158,7 +158,7 @@ class SimpleSynchronousEntry { int* out_result); void WriteSparseData(const EntryOperationData& in_entry_op, net::IOBuffer* in_buf, - int64 max_sparse_data_size, + uint64 max_sparse_data_size, SimpleEntryStat* out_entry_stat, int* out_result); void GetAvailableRange(const EntryOperationData& in_entry_op, diff --git a/chromium/net/disk_cache/simple/simple_test_util.h b/chromium/net/disk_cache/simple/simple_test_util.h index 82eebbec314..ec9b64434c0 100644 --- a/chromium/net/disk_cache/simple/simple_test_util.h +++ b/chromium/net/disk_cache/simple/simple_test_util.h @@ -30,7 +30,7 @@ class ImmutableArray { template <size_t Index> const T& at() const { - COMPILE_ASSERT(Index < size, array_out_of_bounds); + static_assert(Index < size, "array out of bounds"); return data_[Index]; } diff --git a/chromium/net/disk_cache/simple/simple_util_win.cc b/chromium/net/disk_cache/simple/simple_util_win.cc index d0d191dbacd..f2adcd04b89 100644 --- a/chromium/net/disk_cache/simple/simple_util_win.cc +++ b/chromium/net/disk_cache/simple/simple_util_win.cc @@ -24,7 +24,7 @@ bool SimpleCacheDeleteFile(const base::FilePath& path) { // Why a random name? Because if the name was derived from our original name, // then churn on a particular cache entry could cause flakey behaviour. - // TODO(gaivnp): Ensure these "todelete_" files are cleaned up on periodic + // TODO(gavinp): Ensure these "todelete_" files are cleaned up on periodic // directory sweeps. const base::FilePath rename_target = path.DirName().AppendASCII(base::StringPrintf("todelete_%016" PRIx64, |