summaryrefslogtreecommitdiff
path: root/chromium/content/browser/cache_storage/cache_storage_cache.cc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-11-20 15:06:40 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-11-22 11:48:58 +0000
commitdaa093eea7c773db06799a13bd7e4e2e2a9f8f14 (patch)
tree96cc5e7b9194c1b29eab927730bfa419e7111c25 /chromium/content/browser/cache_storage/cache_storage_cache.cc
parentbe59a35641616a4cf23c4a13fa0632624b021c1b (diff)
downloadqtwebengine-chromium-daa093eea7c773db06799a13bd7e4e2e2a9f8f14.tar.gz
BASELINE: Update Chromium to 63.0.3239.58
Change-Id: Ia93b322a00ba4dd4004f3bcf1254063ba90e1605 Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/content/browser/cache_storage/cache_storage_cache.cc')
-rw-r--r--chromium/content/browser/cache_storage/cache_storage_cache.cc451
1 files changed, 346 insertions, 105 deletions
diff --git a/chromium/content/browser/cache_storage/cache_storage_cache.cc b/chromium/content/browser/cache_storage/cache_storage_cache.cc
index 2f1e0202508..fbcb30c940b 100644
--- a/chromium/content/browser/cache_storage/cache_storage_cache.cc
+++ b/chromium/content/browser/cache_storage/cache_storage_cache.cc
@@ -6,6 +6,7 @@
#include <stddef.h>
#include <algorithm>
+#include <functional>
#include <limits>
#include <memory>
#include <string>
@@ -29,6 +30,8 @@
#include "content/public/browser/browser_thread.h"
#include "content/public/common/content_features.h"
#include "content/public/common/referrer.h"
+#include "crypto/hmac.h"
+#include "crypto/symmetric_key.h"
#include "net/base/completion_callback.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
@@ -53,6 +56,19 @@ const size_t kMaxQueryCacheResultBytes =
const char kRecordBytesLabel[] = "DiskCache.CacheStorage";
+// The range of the padding added to response sizes for opaque resources.
+// Increment padding version if changed.
+const uint64_t kPaddingRange = 14431 * 1024;
+
+// If the way that a cache's padding is calculated changes increment this
+// version.
+//
+// History:
+//
+// 1: Uniform random 400K.
+// 2: Uniform random 14,431K.
+const int32_t kCachePaddingAlgorithmVersion = 2;
+
// This class ensures that the cache and the entry have a lifetime as long as
// the blob that is created to contain them.
class CacheStorageCacheDataHandle
@@ -259,6 +275,68 @@ std::unique_ptr<ServiceWorkerResponse> CreateResponse(
metadata.response().cors_exposed_header_names().end()));
}
+// The size of opaque (non-cors) resource responses are padded in order
+// to obfuscate their actual size.
+bool ShouldPadResponseType(network::mojom::FetchResponseType response_type,
+ bool has_urls) {
+ switch (response_type) {
+ case network::mojom::FetchResponseType::kBasic:
+ case network::mojom::FetchResponseType::kCORS:
+ case network::mojom::FetchResponseType::kDefault:
+ case network::mojom::FetchResponseType::kError:
+ return false;
+ case network::mojom::FetchResponseType::kOpaque:
+ case network::mojom::FetchResponseType::kOpaqueRedirect:
+ return has_urls;
+ }
+ NOTREACHED();
+ return false;
+}
+
+bool ShouldPadResourceSize(const content::proto::CacheResponse* response) {
+ return ShouldPadResponseType(
+ ProtoResponseTypeToFetchResponseType(response->response_type()),
+ response->url_list_size());
+}
+
+bool ShouldPadResourceSize(const ServiceWorkerResponse* response) {
+ return ShouldPadResponseType(response->response_type,
+ !response->url_list.empty());
+}
+
+int64_t CalculateResponsePaddingInternal(
+ const std::string& response_url,
+ const crypto::SymmetricKey* padding_key,
+ int side_data_size) {
+ DCHECK(!response_url.empty());
+
+ crypto::HMAC hmac(crypto::HMAC::SHA256);
+ if (!hmac.Init(padding_key))
+ LOG(FATAL) << "Failed to init HMAC.";
+
+ std::vector<uint8_t> digest(hmac.DigestLength());
+ bool success;
+ if (side_data_size)
+ success = hmac.Sign(response_url + "METADATA", &digest[0], digest.size());
+ else
+ success = hmac.Sign(response_url, &digest[0], digest.size());
+ if (!success)
+ LOG(FATAL) << "Failed to sign URL.";
+
+ DCHECK_GE(digest.size(), sizeof(uint64_t));
+ uint64_t val = *(reinterpret_cast<uint64_t*>(&digest[0]));
+ return val % kPaddingRange;
+}
+
+int64_t CalculateResponsePaddingInternal(
+ const ::content::proto::CacheResponse* response,
+ const crypto::SymmetricKey* padding_key,
+ int side_data_size) {
+ DCHECK(ShouldPadResourceSize(response));
+ const std::string& url = response->url_list(response->url_list_size() - 1);
+ return CalculateResponsePaddingInternal(url, padding_key, side_data_size);
+}
+
} // namespace
// The state needed to pass between CacheStorageCache::Put callbacks.
@@ -296,10 +374,12 @@ struct CacheStorageCache::QueryCacheResult {
struct CacheStorageCache::QueryCacheContext {
QueryCacheContext(std::unique_ptr<ServiceWorkerFetchRequest> request,
const CacheStorageCacheQueryParams& options,
- QueryCacheCallback callback)
+ QueryCacheCallback callback,
+ QueryTypes query_types)
: request(std::move(request)),
options(options),
callback(std::move(callback)),
+ query_types(query_types),
matches(base::MakeUnique<QueryCacheResults>()) {}
~QueryCacheContext() {
@@ -316,7 +396,7 @@ struct CacheStorageCache::QueryCacheContext {
std::unique_ptr<ServiceWorkerFetchRequest> request;
CacheStorageCacheQueryParams options;
QueryCacheCallback callback;
- QueryCacheType query_type;
+ QueryTypes query_types = 0;
size_t estimated_out_bytes = 0;
// Iteration state
@@ -337,11 +417,13 @@ std::unique_ptr<CacheStorageCache> CacheStorageCache::CreateMemoryCache(
CacheStorage* cache_storage,
scoped_refptr<net::URLRequestContextGetter> request_context_getter,
scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
- base::WeakPtr<storage::BlobStorageContext> blob_context) {
+ base::WeakPtr<storage::BlobStorageContext> blob_context,
+ std::unique_ptr<crypto::SymmetricKey> cache_padding_key) {
CacheStorageCache* cache = new CacheStorageCache(
origin, cache_name, base::FilePath(), cache_storage,
std::move(request_context_getter), std::move(quota_manager_proxy),
- blob_context, 0 /* cache_size */);
+ blob_context, 0 /* cache_size */, 0 /* cache_padding */,
+ std::move(cache_padding_key));
cache->SetObserver(cache_storage);
cache->InitBackend();
return base::WrapUnique(cache);
@@ -356,12 +438,15 @@ std::unique_ptr<CacheStorageCache> CacheStorageCache::CreatePersistentCache(
scoped_refptr<net::URLRequestContextGetter> request_context_getter,
scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
base::WeakPtr<storage::BlobStorageContext> blob_context,
- int64_t cache_size) {
+ int64_t cache_size,
+ int64_t cache_padding,
+ std::unique_ptr<crypto::SymmetricKey> cache_padding_key) {
CacheStorageCache* cache = new CacheStorageCache(
origin, cache_name, path, cache_storage,
std::move(request_context_getter), std::move(quota_manager_proxy),
- blob_context, cache_size);
- cache->SetObserver(cache_storage), cache->InitBackend();
+ blob_context, cache_size, cache_padding, std::move(cache_padding_key));
+ cache->SetObserver(cache_storage);
+ cache->InitBackend();
return base::WrapUnique(cache);
}
@@ -606,7 +691,9 @@ CacheStorageCache::CacheStorageCache(
scoped_refptr<net::URLRequestContextGetter> request_context_getter,
scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
base::WeakPtr<storage::BlobStorageContext> blob_context,
- int64_t cache_size)
+ int64_t cache_size,
+ int64_t cache_padding,
+ std::unique_ptr<crypto::SymmetricKey> cache_padding_key)
: origin_(origin),
cache_name_(cache_name),
path_(path),
@@ -617,12 +704,15 @@ CacheStorageCache::CacheStorageCache(
scheduler_(
new CacheStorageScheduler(CacheStorageSchedulerClient::CLIENT_CACHE)),
cache_size_(cache_size),
+ cache_padding_(cache_padding),
+ cache_padding_key_(std::move(cache_padding_key)),
max_query_size_bytes_(kMaxQueryCacheResultBytes),
cache_observer_(nullptr),
memory_only_(path.empty()),
weak_ptr_factory_(this) {
DCHECK(!origin_.is_empty());
DCHECK(quota_manager_proxy_.get());
+ DCHECK(cache_padding_key_.get());
quota_manager_proxy_->NotifyOriginInUse(origin_);
}
@@ -630,10 +720,12 @@ CacheStorageCache::CacheStorageCache(
void CacheStorageCache::QueryCache(
std::unique_ptr<ServiceWorkerFetchRequest> request,
const CacheStorageCacheQueryParams& options,
- QueryCacheType query_type,
+ QueryTypes query_types,
QueryCacheCallback callback) {
- DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
- if (backend_state_ != BACKEND_OPEN) {
+ DCHECK_NE(
+ QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES,
+ query_types & (QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES));
+ if (backend_state_ == BACKEND_CLOSED) {
std::move(callback).Run(CACHE_STORAGE_ERROR_STORAGE,
std::unique_ptr<QueryCacheResults>());
return;
@@ -647,9 +739,8 @@ void CacheStorageCache::QueryCache(
}
ServiceWorkerFetchRequest* request_ptr = request.get();
- std::unique_ptr<QueryCacheContext> query_cache_context(
- new QueryCacheContext(std::move(request), options, std::move(callback)));
- query_cache_context->query_type = query_type;
+ std::unique_ptr<QueryCacheContext> query_cache_context(new QueryCacheContext(
+ std::move(request), options, std::move(callback), query_types));
if (query_cache_context->request &&
!query_cache_context->request->url.is_empty() && !options.ignore_search) {
@@ -732,7 +823,7 @@ void CacheStorageCache::QueryCacheFilterEntry(
disk_cache::ScopedEntryPtr entry(query_cache_context->enumerated_entry);
query_cache_context->enumerated_entry = nullptr;
- if (backend_state_ != BACKEND_OPEN) {
+ if (backend_state_ == BACKEND_CLOSED) {
std::move(query_cache_context->callback)
.Run(CACHE_STORAGE_ERROR_NOT_FOUND,
std::move(query_cache_context->matches));
@@ -794,57 +885,50 @@ void CacheStorageCache::QueryCacheDidReadMetadata(
return;
}
- if (query_cache_context->query_type == QueryCacheType::CACHE_ENTRIES) {
- match->request.reset();
- match->response.reset();
+ if (query_cache_context->query_types & QUERY_CACHE_ENTRIES)
match->entry = std::move(entry);
- QueryCacheOpenNextEntry(std::move(query_cache_context));
- return;
- }
-
- query_cache_context->estimated_out_bytes +=
- match->request->EstimatedStructSize();
- if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
- std::move(query_cache_context->callback)
- .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
- std::unique_ptr<QueryCacheResults>());
- return;
- }
- if (query_cache_context->query_type == QueryCacheType::REQUESTS) {
- match->response.reset();
- QueryCacheOpenNextEntry(std::move(query_cache_context));
- return;
+ if (query_cache_context->query_types & QUERY_CACHE_REQUESTS) {
+ query_cache_context->estimated_out_bytes +=
+ match->request->EstimatedStructSize();
+ if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
+ std::move(query_cache_context->callback)
+ .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
+ std::unique_ptr<QueryCacheResults>());
+ return;
+ }
+ } else {
+ match->request.reset();
}
- DCHECK_EQ(QueryCacheType::REQUESTS_AND_RESPONSES,
- query_cache_context->query_type);
-
- query_cache_context->estimated_out_bytes +=
- match->response->EstimatedStructSize();
- if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
- std::move(query_cache_context->callback)
- .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
- std::unique_ptr<QueryCacheResults>());
- return;
- }
+ if (query_cache_context->query_types & QUERY_CACHE_RESPONSES_WITH_BODIES) {
+ query_cache_context->estimated_out_bytes +=
+ match->response->EstimatedStructSize();
+ if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
+ std::move(query_cache_context->callback)
+ .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
+ std::unique_ptr<QueryCacheResults>());
+ return;
+ }
+ if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) {
+ QueryCacheOpenNextEntry(std::move(query_cache_context));
+ return;
+ }
- if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) {
- QueryCacheOpenNextEntry(std::move(query_cache_context));
- return;
- }
+ if (!blob_storage_context_) {
+ std::move(query_cache_context->callback)
+ .Run(CACHE_STORAGE_ERROR_STORAGE,
+ std::unique_ptr<QueryCacheResults>());
+ return;
+ }
- if (!blob_storage_context_) {
- std::move(query_cache_context->callback)
- .Run(CACHE_STORAGE_ERROR_STORAGE,
- base::MakeUnique<QueryCacheResults>());
- return;
+ match->blob_handle =
+ PopulateResponseBody(std::move(entry), match->response.get());
+ } else if (!(query_cache_context->query_types &
+ QUERY_CACHE_RESPONSES_NO_BODIES)) {
+ match->response.reset();
}
- std::unique_ptr<storage::BlobDataHandle> blob_data_handle =
- PopulateResponseBody(std::move(entry), match->response.get());
- match->blob_handle = std::move(blob_data_handle);
-
QueryCacheOpenNextEntry(std::move(query_cache_context));
}
@@ -854,6 +938,22 @@ bool CacheStorageCache::QueryCacheResultCompare(const QueryCacheResult& lhs,
return lhs.entry_time < rhs.entry_time;
}
+// static
+int64_t CacheStorageCache::CalculateResponsePadding(
+ const ServiceWorkerResponse& response,
+ const crypto::SymmetricKey* padding_key,
+ int side_data_size) {
+ if (!ShouldPadResourceSize(&response))
+ return 0;
+ return CalculateResponsePaddingInternal(response.url_list.back().spec(),
+ padding_key, side_data_size);
+}
+
+// static
+int32_t CacheStorageCache::GetResponsePaddingVersion() {
+ return kCachePaddingAlgorithmVersion;
+}
+
void CacheStorageCache::MatchImpl(
std::unique_ptr<ServiceWorkerFetchRequest> request,
const CacheStorageCacheQueryParams& match_params,
@@ -903,7 +1003,8 @@ void CacheStorageCache::MatchAllImpl(
}
QueryCache(
- std::move(request), options, QueryCacheType::REQUESTS_AND_RESPONSES,
+ std::move(request), options,
+ QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES,
base::BindOnce(&CacheStorageCache::MatchAllDidQueryCache,
weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
}
@@ -1016,11 +1117,19 @@ void CacheStorageCache::WriteSideDataDidReadMetaData(
// Get a temporary copy of the entry pointer before passing it in base::Bind.
disk_cache::Entry* temp_entry_ptr = entry.get();
+ std::unique_ptr<content::proto::CacheResponse> response(
+ headers->release_response());
+
+ int side_data_size_before_write = 0;
+ if (ShouldPadResourceSize(response.get()))
+ side_data_size_before_write = entry->GetDataSize(INDEX_SIDE_DATA);
+
net::CompletionCallback write_side_data_callback =
base::AdaptCallbackForRepeating(
base::BindOnce(&CacheStorageCache::WriteSideDataDidWrite,
weak_ptr_factory_.GetWeakPtr(), std::move(callback),
- base::Passed(std::move(entry)), buf_len));
+ base::Passed(std::move(entry)), buf_len,
+ std::move(response), side_data_size_before_write));
int rv = temp_entry_ptr->WriteData(
INDEX_SIDE_DATA, 0 /* offset */, buffer.get(), buf_len,
@@ -1030,10 +1139,13 @@ void CacheStorageCache::WriteSideDataDidReadMetaData(
write_side_data_callback.Run(rv);
}
-void CacheStorageCache::WriteSideDataDidWrite(ErrorCallback callback,
- disk_cache::ScopedEntryPtr entry,
- int expected_bytes,
- int rv) {
+void CacheStorageCache::WriteSideDataDidWrite(
+ ErrorCallback callback,
+ disk_cache::ScopedEntryPtr entry,
+ int expected_bytes,
+ std::unique_ptr<::content::proto::CacheResponse> response,
+ int side_data_size_before_write,
+ int rv) {
if (rv != expected_bytes) {
entry->Doom();
UpdateCacheSize(
@@ -1044,6 +1156,14 @@ void CacheStorageCache::WriteSideDataDidWrite(ErrorCallback callback,
if (rv > 0)
storage::RecordBytesWritten(kRecordBytesLabel, rv);
+ if (ShouldPadResourceSize(response.get())) {
+ cache_padding_ -= CalculateResponsePaddingInternal(
+ response.get(), cache_padding_key_.get(), side_data_size_before_write);
+
+ cache_padding_ += CalculateResponsePaddingInternal(
+ response.get(), cache_padding_key_.get(), rv);
+ }
+
UpdateCacheSize(base::BindOnce(std::move(callback), CACHE_STORAGE_OK));
}
@@ -1063,8 +1183,7 @@ void CacheStorageCache::Put(const CacheStorageBatchOperation& operation,
std::unique_ptr<storage::BlobDataHandle> blob_data_handle;
if (!response->blob_uuid.empty()) {
- DCHECK_EQ(response->blob != nullptr,
- base::FeatureList::IsEnabled(features::kMojoBlobs));
+ DCHECK_EQ(response->blob != nullptr, features::IsMojoBlobsEnabled());
if (!blob_storage_context_) {
std::move(callback).Run(CACHE_STORAGE_ERROR_STORAGE);
return;
@@ -1098,26 +1217,36 @@ void CacheStorageCache::PutImpl(std::unique_ptr<PutContext> put_context) {
return;
}
- std::string key = put_context->request->url.spec();
+ // Explicitly delete the incumbent resource (which may not exist). This is
+ // only done so that it's padding will be decremented from the calculated
+ // cache padding.
+ // TODO(cmumford): Research alternatives to this explicit delete as it
+ // seriously impacts put performance.
+ auto delete_request = base::MakeUnique<ServiceWorkerFetchRequest>(
+ put_context->request->url, "", ServiceWorkerHeaderMap(), Referrer(),
+ false);
- net::CompletionCallback callback =
- base::AdaptCallbackForRepeating(base::BindOnce(
- &CacheStorageCache::PutDidDoomEntry, weak_ptr_factory_.GetWeakPtr(),
- base::Passed(std::move(put_context))));
-
- int rv = backend_->DoomEntry(key, callback);
- if (rv != net::ERR_IO_PENDING)
- callback.Run(rv);
+ CacheStorageCacheQueryParams query_options;
+ query_options.ignore_method = true;
+ query_options.ignore_vary = true;
+ DeleteImpl(std::move(delete_request), query_options,
+ base::BindOnce(&CacheStorageCache::PutDidDeleteEntry,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(put_context))));
}
-void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context,
- int rv) {
+void CacheStorageCache::PutDidDeleteEntry(
+ std::unique_ptr<PutContext> put_context,
+ CacheStorageError error) {
if (backend_state_ != BACKEND_OPEN) {
std::move(put_context->callback).Run(CACHE_STORAGE_ERROR_STORAGE);
return;
}
- // |rv| is ignored as doom entry can fail if the entry doesn't exist.
+ if (error != CACHE_STORAGE_OK && error != CACHE_STORAGE_ERROR_NOT_FOUND) {
+ std::move(put_context->callback).Run(error);
+ return;
+ }
std::unique_ptr<disk_cache::Entry*> scoped_entry_ptr(
new disk_cache::Entry*());
@@ -1131,11 +1260,11 @@ void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context,
base::Passed(std::move(scoped_entry_ptr)),
base::Passed(std::move(put_context))));
- int create_rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr,
- create_entry_callback);
+ int rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr,
+ create_entry_callback);
- if (create_rv != net::ERR_IO_PENDING)
- create_entry_callback.Run(create_rv);
+ if (rv != net::ERR_IO_PENDING)
+ create_entry_callback.Run(rv);
}
void CacheStorageCache::PutDidCreateEntry(
@@ -1222,6 +1351,11 @@ void CacheStorageCache::PutDidWriteHeaders(
if (rv > 0)
storage::RecordBytesWritten(kRecordBytesLabel, rv);
+ if (ShouldPadResourceSize(put_context->response.get())) {
+ cache_padding_ += CalculateResponsePadding(*put_context->response,
+ cache_padding_key_.get(),
+ 0 /* side_data_size */);
+ }
// The metadata is written, now for the response content. The data is streamed
// from the blob into the cache entry.
@@ -1273,6 +1407,60 @@ void CacheStorageCache::PutDidWriteBlobToCache(
base::BindOnce(std::move(put_context->callback), CACHE_STORAGE_OK));
}
+void CacheStorageCache::CalculateCacheSizePadding(
+ SizePaddingCallback got_sizes_callback) {
+ net::CompletionCallback got_size_callback =
+ base::AdaptCallbackForRepeating(base::BindOnce(
+ &CacheStorageCache::CalculateCacheSizePaddingGotSize,
+ weak_ptr_factory_.GetWeakPtr(), std::move(got_sizes_callback)));
+
+ int rv = backend_->CalculateSizeOfAllEntries(got_size_callback);
+ if (rv != net::ERR_IO_PENDING)
+ got_size_callback.Run(rv);
+}
+
+void CacheStorageCache::CalculateCacheSizePaddingGotSize(
+ SizePaddingCallback callback,
+ int cache_size) {
+ // Enumerating entries is only done during cache initialization and only if
+ // necessary.
+ DCHECK_EQ(backend_state_, BACKEND_UNINITIALIZED);
+ std::unique_ptr<ServiceWorkerFetchRequest> request;
+ CacheStorageCacheQueryParams options;
+ options.ignore_search = true;
+ QueryCache(std::move(request), options, QUERY_CACHE_RESPONSES_NO_BODIES,
+ base::BindOnce(&CacheStorageCache::PaddingDidQueryCache,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback),
+ cache_size));
+}
+
+void CacheStorageCache::PaddingDidQueryCache(
+ SizePaddingCallback callback,
+ int cache_size,
+ CacheStorageError error,
+ std::unique_ptr<QueryCacheResults> query_cache_results) {
+ int64_t cache_padding = 0;
+ if (error == CACHE_STORAGE_OK) {
+ for (const auto& result : *query_cache_results) {
+ if (ShouldPadResourceSize(result.response.get())) {
+ int32_t side_data_size =
+ result.entry ? result.entry->GetDataSize(INDEX_SIDE_DATA) : 0;
+ cache_padding += CalculateResponsePadding(
+ *result.response, cache_padding_key_.get(), side_data_size);
+ }
+ }
+ }
+
+ std::move(callback).Run(cache_size, cache_padding);
+}
+
+void CacheStorageCache::CalculateCacheSize(
+ const net::CompletionCallback& callback) {
+ int rv = backend_->CalculateSizeOfAllEntries(callback);
+ if (rv != net::ERR_IO_PENDING)
+ callback.Run(rv);
+}
+
void CacheStorageCache::UpdateCacheSize(base::OnceClosure callback) {
if (backend_state_ != BACKEND_OPEN)
return;
@@ -1280,14 +1468,10 @@ void CacheStorageCache::UpdateCacheSize(base::OnceClosure callback) {
// Note that the callback holds a cache handle to keep the cache alive during
// the operation since this UpdateCacheSize is often run after an operation
// completes and runs its callback.
- net::CompletionCallback size_callback = base::AdaptCallbackForRepeating(
+ CalculateCacheSize(base::AdaptCallbackForRepeating(
base::BindOnce(&CacheStorageCache::UpdateCacheSizeGotSize,
weak_ptr_factory_.GetWeakPtr(),
- base::Passed(CreateCacheHandle()), std::move(callback)));
-
- int rv = backend_->CalculateSizeOfAllEntries(size_callback);
- if (rv != net::ERR_IO_PENDING)
- std::move(size_callback).Run(rv);
+ base::Passed(CreateCacheHandle()), std::move(callback))));
}
void CacheStorageCache::UpdateCacheSizeGotSize(
@@ -1295,10 +1479,9 @@ void CacheStorageCache::UpdateCacheSizeGotSize(
base::OnceClosure callback,
int current_cache_size) {
DCHECK_NE(current_cache_size, CacheStorage::kSizeUnknown);
- int64_t old_cache_size = cache_size_;
cache_size_ = current_cache_size;
-
- int64_t size_delta = current_cache_size - old_cache_size;
+ int64_t size_delta = PaddedCacheSize() - last_reported_size_;
+ last_reported_size_ = PaddedCacheSize();
quota_manager_proxy_->NotifyStorageModified(
storage::QuotaClient::kServiceWorkerCache, origin_,
@@ -1308,7 +1491,7 @@ void CacheStorageCache::UpdateCacheSizeGotSize(
cache_storage_->NotifyCacheContentChanged(cache_name_);
if (cache_observer_)
- cache_observer_->CacheSizeUpdated(this, current_cache_size);
+ cache_observer_->CacheSizeUpdated(this);
std::move(callback).Run();
}
@@ -1342,7 +1525,8 @@ void CacheStorageCache::DeleteImpl(
}
QueryCache(
- std::move(request), match_params, QueryCacheType::CACHE_ENTRIES,
+ std::move(request), match_params,
+ QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_NO_BODIES,
base::BindOnce(&CacheStorageCache::DeleteDidQueryCache,
weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
}
@@ -1363,6 +1547,11 @@ void CacheStorageCache::DeleteDidQueryCache(
for (auto& result : *query_cache_results) {
disk_cache::ScopedEntryPtr entry = std::move(result.entry);
+ if (ShouldPadResourceSize(result.response.get())) {
+ cache_padding_ -=
+ CalculateResponsePadding(*result.response, cache_padding_key_.get(),
+ entry->GetDataSize(INDEX_SIDE_DATA));
+ }
entry->Doom();
}
@@ -1381,7 +1570,7 @@ void CacheStorageCache::KeysImpl(
}
QueryCache(
- std::move(request), options, QueryCacheType::REQUESTS,
+ std::move(request), options, QUERY_CACHE_REQUESTS,
base::BindOnce(&CacheStorageCache::KeysDidQueryCache,
weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
}
@@ -1421,7 +1610,14 @@ void CacheStorageCache::DeleteBackendCompletedIO() {
void CacheStorageCache::SizeImpl(SizeCallback callback) {
DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
- int64_t size = backend_state_ == BACKEND_OPEN ? cache_size_ : 0;
+ // TODO(cmumford): Can CacheStorage::kSizeUnknown be returned instead of zero?
+ if (backend_state_ != BACKEND_OPEN) {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(std::move(callback), 0));
+ return;
+ }
+
+ int64_t size = backend_state_ == BACKEND_OPEN ? PaddedCacheSize() : 0;
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(std::move(callback), size));
}
@@ -1494,19 +1690,24 @@ void CacheStorageCache::InitDidCreateBackend(
return;
}
- net::CompletionCallback size_callback =
- base::AdaptCallbackForRepeating(base::BindOnce(
- &CacheStorageCache::InitGotCacheSize, weak_ptr_factory_.GetWeakPtr(),
- std::move(callback), cache_create_error));
+ auto calculate_size_callback =
+ base::AdaptCallbackForRepeating(std::move(callback));
+ int rv = backend_->CalculateSizeOfAllEntries(base::Bind(
+ &CacheStorageCache::InitGotCacheSize, weak_ptr_factory_.GetWeakPtr(),
+ calculate_size_callback, cache_create_error));
- int rv = backend_->CalculateSizeOfAllEntries(size_callback);
if (rv != net::ERR_IO_PENDING)
- std::move(size_callback).Run(rv);
+ InitGotCacheSize(calculate_size_callback, cache_create_error, rv);
}
void CacheStorageCache::InitGotCacheSize(base::OnceClosure callback,
CacheStorageError cache_create_error,
int cache_size) {
+ if (cache_create_error != CACHE_STORAGE_OK) {
+ InitGotCacheSizeAndPadding(std::move(callback), cache_create_error, 0, 0);
+ return;
+ }
+
// Now that we know the cache size either 1) the cache size should be unknown
// (which is why the size was calculated), or 2) it must match the current
// size. If the sizes aren't equal then there is a bug in how the cache size
@@ -1517,10 +1718,41 @@ void CacheStorageCache::InitGotCacheSize(base::OnceClosure callback,
<< " does not match size from index: " << cache_size_;
UMA_HISTOGRAM_COUNTS_10M("ServiceWorkerCache.IndexSizeDifference",
std::abs(cache_size_ - cache_size));
- // Disabled for crbug.com/681900.
- // DCHECK_EQ(cache_size_, cache_size);
+ if (cache_size_ != cache_size) {
+ // We assume that if the sizes match then then cached padding is still
+ // correct. If not then we recalculate the padding.
+ CalculateCacheSizePaddingGotSize(
+ base::BindOnce(&CacheStorageCache::InitGotCacheSizeAndPadding,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback),
+ cache_create_error),
+ cache_size);
+ return;
+ }
+ }
+
+ if (cache_padding_ == CacheStorage::kSizeUnknown || cache_padding_ < 0) {
+ CalculateCacheSizePaddingGotSize(
+ base::BindOnce(&CacheStorageCache::InitGotCacheSizeAndPadding,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback),
+ cache_create_error),
+ cache_size);
+ return;
}
+
+ // If cached size matches actual size then assume cached padding is still
+ // correct.
+ InitGotCacheSizeAndPadding(std::move(callback), cache_create_error,
+ cache_size, cache_padding_);
+}
+
+void CacheStorageCache::InitGotCacheSizeAndPadding(
+ base::OnceClosure callback,
+ CacheStorageError cache_create_error,
+ int64_t cache_size,
+ int64_t cache_padding) {
cache_size_ = cache_size;
+ cache_padding_ = cache_padding;
+
initializing_ = false;
backend_state_ = (cache_create_error == CACHE_STORAGE_OK && backend_ &&
backend_state_ == BACKEND_UNINITIALIZED)
@@ -1531,7 +1763,7 @@ void CacheStorageCache::InitGotCacheSize(base::OnceClosure callback,
cache_create_error, CACHE_STORAGE_ERROR_LAST + 1);
if (cache_observer_)
- cache_observer_->CacheSizeUpdated(this, cache_size_);
+ cache_observer_->CacheSizeUpdated(this);
std::move(callback).Run();
}
@@ -1552,7 +1784,7 @@ CacheStorageCache::PopulateResponseBody(disk_cache::ScopedEntryPtr entry,
temp_entry, INDEX_RESPONSE_BODY, INDEX_SIDE_DATA);
auto result = blob_storage_context_->AddFinishedBlob(&blob_data);
- if (base::FeatureList::IsEnabled(features::kMojoBlobs)) {
+ if (features::IsMojoBlobsEnabled()) {
storage::mojom::BlobPtr blob_ptr;
storage::BlobImpl::Create(
base::MakeUnique<storage::BlobDataHandle>(*result),
@@ -1569,4 +1801,13 @@ CacheStorageCache::CreateCacheHandle() {
return cache_storage_->CreateCacheHandle(this);
}
+int64_t CacheStorageCache::PaddedCacheSize() const {
+ DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
+ if (cache_size_ == CacheStorage::kSizeUnknown ||
+ cache_padding_ == CacheStorage::kSizeUnknown) {
+ return CacheStorage::kSizeUnknown;
+ }
+ return cache_size_ + cache_padding_;
+}
+
} // namespace content