summaryrefslogtreecommitdiff
path: root/chromium/net/reporting
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 10:22:43 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 12:36:28 +0000
commit271a6c3487a14599023a9106329505597638d793 (patch)
treee040d58ffc86c1480b79ca8528020ca9ec919bf8 /chromium/net/reporting
parent7b2ffa587235a47d4094787d72f38102089f402a (diff)
downloadqtwebengine-chromium-271a6c3487a14599023a9106329505597638d793.tar.gz
BASELINE: Update Chromium to 77.0.3865.59
Change-Id: I1e89a5f3b009a9519a6705102ad65c92fe736f21 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/net/reporting')
-rw-r--r--chromium/net/reporting/OWNERS1
-rw-r--r--chromium/net/reporting/mock_persistent_reporting_store.cc11
-rw-r--r--chromium/net/reporting/mock_persistent_reporting_store.h6
-rw-r--r--chromium/net/reporting/mock_persistent_reporting_store_unittest.cc77
-rw-r--r--chromium/net/reporting/reporting_cache.cc5
-rw-r--r--chromium/net/reporting/reporting_cache.h14
-rw-r--r--chromium/net/reporting/reporting_cache_impl.cc217
-rw-r--r--chromium/net/reporting/reporting_cache_impl.h24
-rw-r--r--chromium/net/reporting/reporting_cache_unittest.cc391
-rw-r--r--chromium/net/reporting/reporting_context.cc13
-rw-r--r--chromium/net/reporting/reporting_context.h15
-rw-r--r--chromium/net/reporting/reporting_delivery_agent.cc14
-rw-r--r--chromium/net/reporting/reporting_endpoint.cc9
-rw-r--r--chromium/net/reporting/reporting_endpoint.h13
-rw-r--r--chromium/net/reporting/reporting_endpoint_manager.cc10
-rw-r--r--chromium/net/reporting/reporting_garbage_collector.cc2
-rw-r--r--chromium/net/reporting/reporting_header_parser.cc10
-rw-r--r--chromium/net/reporting/reporting_header_parser.h1
-rw-r--r--chromium/net/reporting/reporting_header_parser_unittest.cc734
-rw-r--r--chromium/net/reporting/reporting_service.cc2
20 files changed, 1435 insertions, 134 deletions
diff --git a/chromium/net/reporting/OWNERS b/chromium/net/reporting/OWNERS
new file mode 100644
index 00000000000..7117a80a72b
--- /dev/null
+++ b/chromium/net/reporting/OWNERS
@@ -0,0 +1 @@
+chlily@chromium.org
diff --git a/chromium/net/reporting/mock_persistent_reporting_store.cc b/chromium/net/reporting/mock_persistent_reporting_store.cc
index df95febe9af..b56c80f0a02 100644
--- a/chromium/net/reporting/mock_persistent_reporting_store.cc
+++ b/chromium/net/reporting/mock_persistent_reporting_store.cc
@@ -4,6 +4,8 @@
#include "net/reporting/mock_persistent_reporting_store.h"
+#include <algorithm>
+
namespace net {
MockPersistentReportingStore::Command::Command(
@@ -187,6 +189,15 @@ bool MockPersistentReportingStore::VerifyCommands(
return command_list_ == expected_commands;
}
+int MockPersistentReportingStore::CountCommands(Command::Type t) {
+ int c = 0;
+ for (const auto& cmd : command_list_) {
+ if (cmd.type == t)
+ ++c;
+ }
+ return c;
+}
+
MockPersistentReportingStore::CommandList
MockPersistentReportingStore::GetAllCommands() const {
return command_list_;
diff --git a/chromium/net/reporting/mock_persistent_reporting_store.h b/chromium/net/reporting/mock_persistent_reporting_store.h
index ea05f8e3a3f..cbfe43763c2 100644
--- a/chromium/net/reporting/mock_persistent_reporting_store.h
+++ b/chromium/net/reporting/mock_persistent_reporting_store.h
@@ -20,6 +20,8 @@ namespace net {
// received commands in order in a vector, to be checked by tests. Simulates
// loading pre-existing stored endpoints and endpoint groups, which can be
// provided using SetPrestoredClients().
+//
+// TODO(sburnett): Replace this with a fake store to reduce awkwardness.
class MockPersistentReportingStore
: public ReportingCache::PersistentReportingStore {
public:
@@ -103,8 +105,12 @@ class MockPersistentReportingStore
void FinishLoading(bool load_success);
// Verify that |command_list_| matches |expected_commands|.
+ // TODO(sburnett): Replace this with a set of gmock matchers.
bool VerifyCommands(const CommandList& expected_commands) const;
+ // Count the number of commands with type |t|.
+ int CountCommands(Command::Type t);
+
CommandList GetAllCommands() const;
// Gets the number of stored endpoints/groups, simulating the actual number
diff --git a/chromium/net/reporting/mock_persistent_reporting_store_unittest.cc b/chromium/net/reporting/mock_persistent_reporting_store_unittest.cc
index b2006ae6e25..0c4c6660c9e 100644
--- a/chromium/net/reporting/mock_persistent_reporting_store_unittest.cc
+++ b/chromium/net/reporting/mock_persistent_reporting_store_unittest.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "net/reporting/mock_persistent_reporting_store.h"
+
#include <vector>
#include "base/location.h"
#include "base/test/bind_test_util.h"
#include "base/time/time.h"
-#include "net/reporting/mock_persistent_reporting_store.h"
#include "net/reporting/reporting_endpoint.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "url/gurl.h"
@@ -17,6 +18,8 @@ namespace net {
namespace {
+using CommandType = MockPersistentReportingStore::Command::Type;
+
const url::Origin kOrigin = url::Origin::Create(GURL("https://example.test/"));
const char kGroupName[] = "groupname";
const GURL kUrl = GURL("https://endpoint.test/reports");
@@ -59,8 +62,7 @@ TEST(MockPersistentReportingStoreTest, FinishLoading) {
store.LoadReportingClients(MakeExpectedRunReportingClientsLoadedCallback(
&loaded_endpoints, &loaded_groups));
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::LOAD_REPORTING_CLIENTS);
+ expected_commands.emplace_back(CommandType::LOAD_REPORTING_CLIENTS);
store.FinishLoading(true /* load_success */);
EXPECT_EQ(0u, loaded_endpoints.size());
@@ -82,8 +84,7 @@ TEST(MockPersistentReportingStoreTest, PreStoredClients) {
store.LoadReportingClients(MakeExpectedRunReportingClientsLoadedCallback(
&loaded_endpoints, &loaded_groups));
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::LOAD_REPORTING_CLIENTS);
+ expected_commands.emplace_back(CommandType::LOAD_REPORTING_CLIENTS);
store.FinishLoading(true /* load_success */);
EXPECT_EQ(1u, loaded_endpoints.size());
@@ -105,8 +106,7 @@ TEST(MockPersistentReportingStoreTest, FailedLoad) {
store.LoadReportingClients(MakeExpectedRunReportingClientsLoadedCallback(
&loaded_endpoints, &loaded_groups));
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::LOAD_REPORTING_CLIENTS);
+ expected_commands.emplace_back(CommandType::LOAD_REPORTING_CLIENTS);
store.FinishLoading(false /* load_success */);
EXPECT_EQ(0u, loaded_endpoints.size());
@@ -123,8 +123,7 @@ TEST(MockPersistentReportingStoreTest, AddFlushDeleteFlush) {
store.LoadReportingClients(MakeExpectedRunReportingClientsLoadedCallback(
&loaded_endpoints, &loaded_groups));
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::LOAD_REPORTING_CLIENTS);
+ expected_commands.emplace_back(CommandType::LOAD_REPORTING_CLIENTS);
EXPECT_EQ(1u, store.GetAllCommands().size());
store.FinishLoading(true /* load_success */);
@@ -134,44 +133,74 @@ TEST(MockPersistentReportingStoreTest, AddFlushDeleteFlush) {
EXPECT_EQ(0, store.StoredEndpointGroupsCount());
store.AddReportingEndpoint(kEndpoint);
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::ADD_REPORTING_ENDPOINT,
- kEndpoint);
+ expected_commands.emplace_back(CommandType::ADD_REPORTING_ENDPOINT,
+ kEndpoint);
EXPECT_EQ(2u, store.GetAllCommands().size());
store.AddReportingEndpointGroup(kGroup);
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::ADD_REPORTING_ENDPOINT_GROUP,
- kGroup);
+ expected_commands.emplace_back(CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ kGroup);
EXPECT_EQ(3u, store.GetAllCommands().size());
store.Flush();
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::FLUSH);
+ expected_commands.emplace_back(CommandType::FLUSH);
EXPECT_EQ(4u, store.GetAllCommands().size());
EXPECT_EQ(1, store.StoredEndpointsCount());
EXPECT_EQ(1, store.StoredEndpointGroupsCount());
store.DeleteReportingEndpoint(kEndpoint);
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::DELETE_REPORTING_ENDPOINT,
- kEndpoint);
+ expected_commands.emplace_back(CommandType::DELETE_REPORTING_ENDPOINT,
+ kEndpoint);
EXPECT_EQ(5u, store.GetAllCommands().size());
store.DeleteReportingEndpointGroup(kGroup);
- expected_commands.emplace_back(MockPersistentReportingStore::Command::Type::
- DELETE_REPORTING_ENDPOINT_GROUP,
+ expected_commands.emplace_back(CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
kGroup);
EXPECT_EQ(6u, store.GetAllCommands().size());
store.Flush();
- expected_commands.emplace_back(
- MockPersistentReportingStore::Command::Type::FLUSH);
+ expected_commands.emplace_back(CommandType::FLUSH);
EXPECT_EQ(7u, store.GetAllCommands().size());
EXPECT_EQ(0, store.StoredEndpointsCount());
EXPECT_EQ(0, store.StoredEndpointGroupsCount());
EXPECT_TRUE(store.VerifyCommands(expected_commands));
+
+ EXPECT_EQ(1, store.CountCommands(CommandType::LOAD_REPORTING_CLIENTS));
+ EXPECT_EQ(
+ 0, store.CountCommands(CommandType::UPDATE_REPORTING_ENDPOINT_DETAILS));
+}
+
+TEST(MockPersistentReportingStoreTest, CountCommands) {
+ MockPersistentReportingStore store;
+
+ std::vector<ReportingEndpoint> loaded_endpoints;
+ std::vector<CachedReportingEndpointGroup> loaded_groups;
+ store.LoadReportingClients(MakeExpectedRunReportingClientsLoadedCallback(
+ &loaded_endpoints, &loaded_groups));
+ store.FinishLoading(true /* load_success */);
+
+ store.AddReportingEndpoint(kEndpoint);
+ store.AddReportingEndpointGroup(kGroup);
+ store.Flush();
+
+ store.DeleteReportingEndpoint(kEndpoint);
+ store.DeleteReportingEndpointGroup(kGroup);
+ store.Flush();
+
+ EXPECT_EQ(1, store.CountCommands(CommandType::LOAD_REPORTING_CLIENTS));
+ EXPECT_EQ(1, store.CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(1, store.CountCommands(CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(0, store.CountCommands(
+ CommandType::UPDATE_REPORTING_ENDPOINT_GROUP_ACCESS_TIME));
+ EXPECT_EQ(
+ 0, store.CountCommands(CommandType::UPDATE_REPORTING_ENDPOINT_DETAILS));
+ EXPECT_EQ(0, store.CountCommands(
+ CommandType::UPDATE_REPORTING_ENDPOINT_GROUP_DETAILS));
+ EXPECT_EQ(1, store.CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(1,
+ store.CountCommands(CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(2, store.CountCommands(CommandType::FLUSH));
}
} // namespace
diff --git a/chromium/net/reporting/reporting_cache.cc b/chromium/net/reporting/reporting_cache.cc
index ec5ef10150d..8773122dab3 100644
--- a/chromium/net/reporting/reporting_cache.cc
+++ b/chromium/net/reporting/reporting_cache.cc
@@ -11,9 +11,8 @@ namespace net {
// static
std::unique_ptr<ReportingCache> ReportingCache::Create(
- ReportingContext* context,
- PersistentReportingStore* store) {
- return std::make_unique<ReportingCacheImpl>(context, store);
+ ReportingContext* context) {
+ return std::make_unique<ReportingCacheImpl>(context);
}
ReportingCache::~ReportingCache() = default;
diff --git a/chromium/net/reporting/reporting_cache.h b/chromium/net/reporting/reporting_cache.h
index 6b441fa8268..a9f6ad76b50 100644
--- a/chromium/net/reporting/reporting_cache.h
+++ b/chromium/net/reporting/reporting_cache.h
@@ -48,10 +48,7 @@ class NET_EXPORT ReportingCache {
public:
class PersistentReportingStore;
- // |store| should outlive the ReportingCache.
- static std::unique_ptr<ReportingCache> Create(
- ReportingContext* context,
- PersistentReportingStore* store);
+ static std::unique_ptr<ReportingCache> Create(ReportingContext* context);
virtual ~ReportingCache();
@@ -162,6 +159,15 @@ class NET_EXPORT ReportingCache {
// they become empty.
virtual void RemoveEndpointsForUrl(const GURL& url) = 0;
+ // Insert endpoints and endpoint groups that have been loaded from the store.
+ //
+ // You must only call this method if context.store() was non-null when you
+ // constructed the cache and persist_clients_across_restarts in your
+ // ReportingPolicy is true.
+ virtual void AddClientsLoadedFromStore(
+ std::vector<ReportingEndpoint> loaded_endpoints,
+ std::vector<CachedReportingEndpointGroup> loaded_endpoint_groups) = 0;
+
// Gets endpoints that apply to a delivery for |origin| and |group|.
//
// First checks for |group| in a client exactly matching |origin|.
diff --git a/chromium/net/reporting/reporting_cache_impl.cc b/chromium/net/reporting/reporting_cache_impl.cc
index 01f343d1fdd..52cf80e9305 100644
--- a/chromium/net/reporting/reporting_cache_impl.cc
+++ b/chromium/net/reporting/reporting_cache_impl.cc
@@ -40,20 +40,19 @@ std::string GetSuperdomain(const std::string& domain) {
} // namespace
-ReportingCacheImpl::ReportingCacheImpl(ReportingContext* context,
- PersistentReportingStore* store)
- : context_(context), store_(store) {
+ReportingCacheImpl::ReportingCacheImpl(ReportingContext* context)
+ : context_(context) {
DCHECK(context_);
}
ReportingCacheImpl::~ReportingCacheImpl() {
- base::TimeTicks now = tick_clock()->NowTicks();
+ base::TimeTicks now = tick_clock().NowTicks();
// Mark all undoomed reports as erased at shutdown, and record outcomes of
// all remaining reports (doomed or not).
for (auto it = reports_.begin(); it != reports_.end(); ++it) {
ReportingReport* report = it->second.get();
- if (!base::ContainsKey(doomed_reports_, report))
+ if (!base::Contains(doomed_reports_, report))
report->outcome = ReportingReport::Outcome::ERASED_REPORTING_SHUT_DOWN;
report->RecordOutcome(now);
}
@@ -84,7 +83,7 @@ void ReportingCacheImpl::AddReport(const GURL& url,
DCHECK_NE(nullptr, to_evict);
// The newly-added report isn't pending, so even if all other reports are
// pending, the cache should have a report to evict.
- DCHECK(!base::ContainsKey(pending_reports_, to_evict));
+ DCHECK(!base::Contains(pending_reports_, to_evict));
reports_[to_evict]->outcome = ReportingReport::Outcome::ERASED_EVICTED;
RemoveReportInternal(to_evict);
}
@@ -96,7 +95,7 @@ void ReportingCacheImpl::GetReports(
std::vector<const ReportingReport*>* reports_out) const {
reports_out->clear();
for (const auto& it : reports_) {
- if (!base::ContainsKey(doomed_reports_, it.first))
+ if (!base::Contains(doomed_reports_, it.first))
reports_out->push_back(it.second.get());
}
}
@@ -131,9 +130,9 @@ base::Value ReportingCacheImpl::GetReportsAsValue() const {
if (report->body) {
report_dict.SetKey("body", report->body->Clone());
}
- if (base::ContainsKey(doomed_reports_, report)) {
+ if (base::Contains(doomed_reports_, report)) {
report_dict.SetKey("status", base::Value("doomed"));
- } else if (base::ContainsKey(pending_reports_, report)) {
+ } else if (base::Contains(pending_reports_, report)) {
report_dict.SetKey("status", base::Value("pending"));
} else {
report_dict.SetKey("status", base::Value("queued"));
@@ -147,8 +146,8 @@ void ReportingCacheImpl::GetNonpendingReports(
std::vector<const ReportingReport*>* reports_out) const {
reports_out->clear();
for (const auto& it : reports_) {
- if (!base::ContainsKey(pending_reports_, it.first) &&
- !base::ContainsKey(doomed_reports_, it.first)) {
+ if (!base::Contains(pending_reports_, it.first) &&
+ !base::Contains(doomed_reports_, it.first)) {
reports_out->push_back(it.second.get());
}
}
@@ -169,7 +168,7 @@ void ReportingCacheImpl::ClearReportsPending(
for (const ReportingReport* report : reports) {
size_t erased = pending_reports_.erase(report);
DCHECK_EQ(1u, erased);
- if (base::ContainsKey(doomed_reports_, report)) {
+ if (base::Contains(doomed_reports_, report)) {
reports_to_remove.push_back(report);
doomed_reports_.erase(report);
}
@@ -182,7 +181,7 @@ void ReportingCacheImpl::ClearReportsPending(
void ReportingCacheImpl::IncrementReportsAttempts(
const std::vector<const ReportingReport*>& reports) {
for (const ReportingReport* report : reports) {
- DCHECK(base::ContainsKey(reports_, report));
+ DCHECK(base::Contains(reports_, report));
reports_[report]->attempts++;
}
@@ -216,10 +215,10 @@ void ReportingCacheImpl::RemoveReports(
ReportingReport::Outcome outcome) {
for (const ReportingReport* report : reports) {
reports_[report]->outcome = outcome;
- if (base::ContainsKey(pending_reports_, report)) {
+ if (base::Contains(pending_reports_, report)) {
doomed_reports_.insert(report);
} else {
- DCHECK(!base::ContainsKey(doomed_reports_, report));
+ DCHECK(!base::Contains(doomed_reports_, report));
RemoveReportInternal(report);
}
}
@@ -232,7 +231,7 @@ void ReportingCacheImpl::RemoveAllReports(ReportingReport::Outcome outcome) {
for (auto it = reports_.begin(); it != reports_.end(); ++it) {
ReportingReport* report = it->second.get();
report->outcome = outcome;
- if (!base::ContainsKey(pending_reports_, report))
+ if (!base::Contains(pending_reports_, report))
reports_to_remove.push_back(report);
else
doomed_reports_.insert(report);
@@ -250,12 +249,12 @@ size_t ReportingCacheImpl::GetFullReportCountForTesting() const {
bool ReportingCacheImpl::IsReportPendingForTesting(
const ReportingReport* report) const {
- return base::ContainsKey(pending_reports_, report);
+ return base::Contains(pending_reports_, report);
}
bool ReportingCacheImpl::IsReportDoomedForTesting(
const ReportingReport* report) const {
- return base::ContainsKey(doomed_reports_, report);
+ return base::Contains(doomed_reports_, report);
}
void ReportingCacheImpl::OnParsedHeader(
@@ -264,12 +263,13 @@ void ReportingCacheImpl::OnParsedHeader(
SanityCheckClients();
OriginClient new_client(origin);
- base::Time now = clock()->Now();
+ base::Time now = clock().Now();
new_client.last_used = now;
+ std::map<ReportingEndpointGroupKey, std::set<GURL>> endpoints_per_group;
+
for (const auto& parsed_endpoint_group : parsed_header) {
new_client.endpoint_group_names.insert(parsed_endpoint_group.name);
- new_client.endpoint_count += parsed_endpoint_group.endpoints.size();
// Creates an endpoint group and sets its |last_used| to |now|.
CachedReportingEndpointGroup new_group(new_client.origin,
@@ -278,6 +278,7 @@ void ReportingCacheImpl::OnParsedHeader(
std::set<GURL> new_endpoints;
for (const auto& parsed_endpoint_info : parsed_endpoint_group.endpoints) {
new_endpoints.insert(parsed_endpoint_info.url);
+ endpoints_per_group[new_group.group_key].insert(parsed_endpoint_info.url);
ReportingEndpoint new_endpoint(origin, parsed_endpoint_group.name,
std::move(parsed_endpoint_info));
AddOrUpdateEndpoint(std::move(new_endpoint));
@@ -290,6 +291,14 @@ void ReportingCacheImpl::OnParsedHeader(
AddOrUpdateEndpointGroup(std::move(new_group));
}
+ // Compute the total endpoint count for this origin. We can't just count the
+ // number of endpoints per group because there may be duplicate endpoint URLs,
+ // which we ignore. See http://crbug.com/983000 for discussion.
+ // TODO(crbug.com/983000): Allow duplicate endpoint URLs.
+ for (const auto& group_key_and_endpoint_set : endpoints_per_group) {
+ new_client.endpoint_count += group_key_and_endpoint_set.second.size();
+ }
+
// Remove endpoint groups that may have been configured for an existing client
// for |origin|, but which are not specified in the current header.
RemoveEndpointGroupsForOriginOtherThan(origin,
@@ -324,10 +333,17 @@ void ReportingCacheImpl::RemoveClient(const url::Origin& origin) {
void ReportingCacheImpl::RemoveAllClients() {
SanityCheckClients();
- origin_clients_.clear();
- endpoint_groups_.clear();
- endpoints_.clear();
- endpoint_its_by_url_.clear();
+
+ auto remove_it = origin_clients_.begin();
+ while (remove_it != origin_clients_.end()) {
+ remove_it = RemoveClientInternal(remove_it);
+ }
+
+ DCHECK(origin_clients_.empty());
+ DCHECK(endpoint_groups_.empty());
+ DCHECK(endpoints_.empty());
+ DCHECK(endpoint_its_by_url_.empty());
+
SanityCheckClients();
context_->NotifyCachedClientsUpdated();
}
@@ -382,11 +398,103 @@ void ReportingCacheImpl::RemoveEndpointsForUrl(const GURL& url) {
context_->NotifyCachedClientsUpdated();
}
+// Reconstruct an OriginClient from the loaded endpoint groups, and add the
+// loaded endpoints and endpoint groups into the cache.
+void ReportingCacheImpl::AddClientsLoadedFromStore(
+ std::vector<ReportingEndpoint> loaded_endpoints,
+ std::vector<CachedReportingEndpointGroup> loaded_endpoint_groups) {
+ DCHECK(context_->IsClientDataPersisted());
+
+ std::sort(loaded_endpoints.begin(), loaded_endpoints.end(),
+ [](const ReportingEndpoint& a, const ReportingEndpoint& b) -> bool {
+ return a.group_key < b.group_key;
+ });
+ std::sort(loaded_endpoint_groups.begin(), loaded_endpoint_groups.end(),
+ [](const CachedReportingEndpointGroup& a,
+ const CachedReportingEndpointGroup& b) -> bool {
+ return a.group_key < b.group_key;
+ });
+
+ // If using a persistent store, cache should be empty before loading finishes.
+ DCHECK(origin_clients_.empty());
+ DCHECK(endpoint_groups_.empty());
+ DCHECK(endpoints_.empty());
+ DCHECK(endpoint_its_by_url_.empty());
+
+ // |loaded_endpoints| and |loaded_endpoint_groups| should both be sorted by
+ // origin and group name.
+ auto endpoints_it = loaded_endpoints.begin();
+ auto endpoint_groups_it = loaded_endpoint_groups.begin();
+
+ base::Optional<OriginClient> origin_client;
+
+ while (endpoint_groups_it != loaded_endpoint_groups.end() &&
+ endpoints_it != loaded_endpoints.end()) {
+ const CachedReportingEndpointGroup& group = *endpoint_groups_it;
+ const ReportingEndpointGroupKey& group_key = group.group_key;
+
+ if (group_key < endpoints_it->group_key) {
+ // This endpoint group has no associated endpoints, so move on to the next
+ // endpoint group.
+ ++endpoint_groups_it;
+ continue;
+ } else if (group_key > endpoints_it->group_key) {
+ // This endpoint has no associated endpoint group, so move on to the next
+ // endpoint.
+ ++endpoints_it;
+ continue;
+ }
+
+ DCHECK(group_key == endpoints_it->group_key);
+
+ size_t cur_group_endpoints_count = 0;
+
+ // Insert the endpoints corresponding to this group.
+ while (endpoints_it != loaded_endpoints.end() &&
+ endpoints_it->group_key == group_key) {
+ EndpointMap::iterator inserted = endpoints_.insert(
+ std::make_pair(group_key, std::move(*endpoints_it)));
+ endpoint_its_by_url_.insert(
+ std::make_pair(inserted->second.info.url, inserted));
+ ++cur_group_endpoints_count;
+ ++endpoints_it;
+ }
+
+ if (!origin_client || origin_client->origin != group_key.origin) {
+ // Store the old origin_client and start a new one.
+ if (origin_client) {
+ OriginClientMap::iterator client_it =
+ origin_clients_.insert(std::make_pair(origin_client->origin.host(),
+ std::move(*origin_client)));
+ EnforcePerOriginAndGlobalEndpointLimits(client_it->second.origin);
+ }
+ origin_client.emplace(group_key.origin);
+ }
+ DCHECK(origin_client.has_value());
+ origin_client->endpoint_group_names.insert(group_key.group_name);
+ origin_client->endpoint_count += cur_group_endpoints_count;
+ origin_client->last_used =
+ std::max(origin_client->last_used, group.last_used);
+
+ endpoint_groups_.insert(std::make_pair(group_key, std::move(group)));
+
+ ++endpoint_groups_it;
+ }
+
+ if (origin_client) {
+ OriginClientMap::iterator client_it = origin_clients_.insert(std::make_pair(
+ origin_client->origin.host(), std::move(*origin_client)));
+ EnforcePerOriginAndGlobalEndpointLimits(client_it->second.origin);
+ }
+
+ SanityCheckClients();
+}
+
std::vector<ReportingEndpoint>
ReportingCacheImpl::GetCandidateEndpointsForDelivery(
const url::Origin& origin,
const std::string& group_name) {
- base::Time now = clock()->Now();
+ base::Time now = clock().Now();
SanityCheckClients();
// Look for an exact origin match for |origin| and |group|.
@@ -411,7 +519,7 @@ ReportingCacheImpl::GetCandidateEndpointsForDelivery(
// Client for a superdomain of |origin|
const OriginClient& client = client_it->second;
// Check if |client| has a group with the requested name.
- if (!base::ContainsKey(client.endpoint_group_names, group_name))
+ if (!base::Contains(client.endpoint_group_names, group_name))
continue;
ReportingEndpointGroupKey group_key(client.origin, group_name);
@@ -503,7 +611,7 @@ void ReportingCacheImpl::SetEndpointForTesting(
origin_clients_.insert(std::make_pair(domain, std::move(new_client)));
}
- base::Time now = clock()->Now();
+ base::Time now = clock().Now();
ReportingEndpointGroupKey group_key(origin, group_name);
EndpointGroupMap::iterator group_it = FindEndpointGroupIt(group_key);
@@ -557,16 +665,8 @@ ReportingCacheImpl::OriginClient::OriginClient(OriginClient&& other) = default;
ReportingCacheImpl::OriginClient::~OriginClient() = default;
-bool ReportingCacheImpl::IsReportDataPersisted() const {
- return store_ && context_->policy().persist_reports_across_restarts;
-}
-
-bool ReportingCacheImpl::IsClientDataPersisted() const {
- return store_ && context_->policy().persist_clients_across_restarts;
-}
-
void ReportingCacheImpl::RemoveReportInternal(const ReportingReport* report) {
- reports_[report]->RecordOutcome(tick_clock()->NowTicks());
+ reports_[report]->RecordOutcome(tick_clock().NowTicks());
size_t erased = reports_.erase(report);
DCHECK_EQ(1u, erased);
}
@@ -576,7 +676,7 @@ const ReportingReport* ReportingCacheImpl::FindReportToEvict() const {
for (const auto& it : reports_) {
const ReportingReport* report = it.first;
- if (base::ContainsKey(pending_reports_, report))
+ if (base::Contains(pending_reports_, report))
continue;
if (!earliest_queued || report->queued < earliest_queued->queued) {
earliest_queued = report;
@@ -601,7 +701,7 @@ void ReportingCacheImpl::SanityCheckClients() const {
total_endpoint_group_count += SanityCheckOriginClient(domain, client);
// We have not seen a duplicate client with the same origin.
- DCHECK(!base::ContainsKey(origins_in_cache, client.origin));
+ DCHECK(!base::Contains(origins_in_cache, client.origin));
origins_in_cache.insert(client.origin);
}
@@ -634,6 +734,7 @@ size_t ReportingCacheImpl::SanityCheckOriginClient(
for (const std::string& group_name : client.endpoint_group_names) {
++endpoint_group_count_in_client;
ReportingEndpointGroupKey group_key(client.origin, group_name);
+ DCHECK(endpoint_groups_.find(group_key) != endpoint_groups_.end());
const CachedReportingEndpointGroup& group = endpoint_groups_.at(group_key);
endpoint_count_in_client += SanityCheckEndpointGroup(group_key, group);
}
@@ -672,7 +773,7 @@ size_t ReportingCacheImpl::SanityCheckEndpointGroup(
// We have not seen a duplicate endpoint with the same URL in this
// group.
- DCHECK(!base::ContainsKey(endpoint_urls_in_group, endpoint.info.url));
+ DCHECK(!base::Contains(endpoint_urls_in_group, endpoint.info.url));
endpoint_urls_in_group.insert(endpoint.info.url);
++endpoint_count_in_group;
@@ -693,14 +794,14 @@ void ReportingCacheImpl::SanityCheckEndpoint(
DCHECK_LE(0, endpoint.info.weight);
// The endpoint is in the |endpoint_its_by_url_| index.
- DCHECK(base::ContainsKey(endpoint_its_by_url_, endpoint.info.url));
+ DCHECK(base::Contains(endpoint_its_by_url_, endpoint.info.url));
auto url_range = endpoint_its_by_url_.equal_range(endpoint.info.url);
std::vector<EndpointMap::iterator> endpoint_its_for_url;
for (auto index_it = url_range.first; index_it != url_range.second;
++index_it) {
endpoint_its_for_url.push_back(index_it->second);
}
- DCHECK(base::ContainsValue(endpoint_its_for_url, endpoint_it));
+ DCHECK(base::Contains(endpoint_its_for_url, endpoint_it));
#endif // DCHECK_IS_ON()
}
@@ -762,6 +863,9 @@ void ReportingCacheImpl::AddOrUpdateEndpointGroup(
// Add a new endpoint group for this origin and group name.
if (group_it == endpoint_groups_.end()) {
+ if (context_->IsClientDataPersisted())
+ store()->AddReportingEndpointGroup(new_group);
+
endpoint_groups_.insert(
std::make_pair(new_group.group_key, std::move(new_group)));
return;
@@ -773,6 +877,9 @@ void ReportingCacheImpl::AddOrUpdateEndpointGroup(
old_group.expires = new_group.expires;
old_group.last_used = new_group.last_used;
+ if (context_->IsClientDataPersisted())
+ store()->UpdateReportingEndpointGroupDetails(new_group);
+
// Note: SanityCheckClients() may fail here because we have not yet
// added/updated the OriginClient for |origin| yet.
}
@@ -783,6 +890,9 @@ void ReportingCacheImpl::AddOrUpdateEndpoint(ReportingEndpoint new_endpoint) {
// Add a new endpoint for this origin, group, and url.
if (endpoint_it == endpoints_.end()) {
+ if (context_->IsClientDataPersisted())
+ store()->AddReportingEndpoint(new_endpoint);
+
url::Origin origin = new_endpoint.group_key.origin;
EndpointMap::iterator endpoint_it = endpoints_.insert(
std::make_pair(new_endpoint.group_key, std::move(new_endpoint)));
@@ -801,6 +911,9 @@ void ReportingCacheImpl::AddOrUpdateEndpoint(ReportingEndpoint new_endpoint) {
old_endpoint.info.weight = new_endpoint.info.weight;
// |old_endpoint.stats| stays the same.
+ if (context_->IsClientDataPersisted())
+ store()->UpdateReportingEndpointDetails(new_endpoint);
+
// Note: SanityCheckClients() may fail here because we have not yet
// added/updated the OriginClient for |origin| yet.
}
@@ -822,7 +935,7 @@ void ReportingCacheImpl::RemoveEndpointsInGroupOtherThan(
const auto group_range = endpoints_.equal_range(group_key);
for (auto it = group_range.first; it != group_range.second;) {
- if (base::ContainsKey(endpoints_to_keep_urls, it->second.info.url)) {
+ if (base::Contains(endpoints_to_keep_urls, it->second.info.url)) {
++it;
continue;
}
@@ -878,6 +991,8 @@ void ReportingCacheImpl::MarkEndpointGroupAndClientUsed(
base::Time now) {
group_it->second.last_used = now;
client_it->second.last_used = now;
+ if (context_->IsClientDataPersisted())
+ store()->UpdateReportingEndpointGroupAccessTime(group_it->second);
}
base::Optional<ReportingCacheImpl::EndpointMap::iterator>
@@ -902,6 +1017,8 @@ ReportingCacheImpl::RemoveEndpointInternal(OriginClientMap::iterator client_it,
DCHECK_GT(client_it->second.endpoint_count, 1u);
RemoveEndpointItFromIndex(endpoint_it);
--client_it->second.endpoint_count;
+ if (context_->IsClientDataPersisted())
+ store()->DeleteReportingEndpoint(endpoint_it->second);
return endpoints_.erase(endpoint_it);
}
@@ -922,6 +1039,9 @@ ReportingCacheImpl::RemoveEndpointGroupInternal(
if (num_endpoints_removed)
*num_endpoints_removed += endpoints_removed;
for (auto it = group_range.first; it != group_range.second; ++it) {
+ if (context_->IsClientDataPersisted())
+ store()->DeleteReportingEndpoint(it->second);
+
RemoveEndpointItFromIndex(it);
}
endpoints_.erase(group_range.first, group_range.second);
@@ -935,6 +1055,9 @@ ReportingCacheImpl::RemoveEndpointGroupInternal(
client.endpoint_group_names.erase(group_key.group_name);
DCHECK_EQ(1u, erased_from_client);
+ if (context_->IsClientDataPersisted())
+ store()->DeleteReportingEndpointGroup(group_it->second);
+
base::Optional<EndpointGroupMap::iterator> rv =
endpoint_groups_.erase(group_it);
@@ -955,10 +1078,16 @@ ReportingCacheImpl::RemoveClientInternal(OriginClientMap::iterator client_it) {
// Erase all groups in this client, and all endpoints in those groups.
for (const std::string& group_name : client.endpoint_group_names) {
ReportingEndpointGroupKey group_key(client.origin, group_name);
- endpoint_groups_.erase(group_key);
+ EndpointGroupMap::iterator group_it = FindEndpointGroupIt(group_key);
+ if (context_->IsClientDataPersisted())
+ store()->DeleteReportingEndpointGroup(group_it->second);
+ endpoint_groups_.erase(group_it);
const auto group_range = endpoints_.equal_range(group_key);
for (auto it = group_range.first; it != group_range.second; ++it) {
+ if (context_->IsClientDataPersisted())
+ store()->DeleteReportingEndpoint(it->second);
+
RemoveEndpointItFromIndex(it);
}
endpoints_.erase(group_range.first, group_range.second);
@@ -1078,7 +1207,7 @@ void ReportingCacheImpl::EvictEndpointFromGroup(
bool ReportingCacheImpl::RemoveExpiredOrStaleGroups(
OriginClientMap::iterator client_it,
size_t* num_endpoints_removed) {
- base::Time now = clock()->Now();
+ base::Time now = clock().Now();
// Make a copy of this because |client_it| may be invalidated.
std::set<std::string> groups_in_client_names(
client_it->second.endpoint_group_names);
diff --git a/chromium/net/reporting/reporting_cache_impl.h b/chromium/net/reporting/reporting_cache_impl.h
index 908a14b7ec3..bccd0b168d9 100644
--- a/chromium/net/reporting/reporting_cache_impl.h
+++ b/chromium/net/reporting/reporting_cache_impl.h
@@ -31,8 +31,7 @@ namespace net {
class ReportingCacheImpl : public ReportingCache {
public:
- ReportingCacheImpl(ReportingContext* context,
- PersistentReportingStore* store);
+ ReportingCacheImpl(ReportingContext* context);
~ReportingCacheImpl() override;
@@ -76,6 +75,10 @@ class ReportingCacheImpl : public ReportingCache {
void RemoveEndpointGroup(const url::Origin& origin,
const std::string& name) override;
void RemoveEndpointsForUrl(const GURL& url) override;
+ void AddClientsLoadedFromStore(
+ std::vector<ReportingEndpoint> loaded_endpoints,
+ std::vector<CachedReportingEndpointGroup> loaded_endpoint_groups)
+ override;
std::vector<ReportingEndpoint> GetCandidateEndpointsForDelivery(
const url::Origin& origin,
const std::string& group_name) override;
@@ -129,11 +132,6 @@ class ReportingCacheImpl : public ReportingCache {
using EndpointMap =
std::multimap<ReportingEndpointGroupKey, ReportingEndpoint>;
- // Returns whether the cached data is persisted across restarts in the
- // PersistentReportingStore.
- bool IsReportDataPersisted() const;
- bool IsClientDataPersisted() const;
-
void RemoveReportInternal(const ReportingReport* report);
const ReportingReport* FindReportToEvict() const;
@@ -286,17 +284,13 @@ class ReportingCacheImpl : public ReportingCache {
const CachedReportingEndpointGroup& group) const;
base::Value GetEndpointAsValue(const ReportingEndpoint& endpoint) const;
- base::Clock* clock() const { return context_->clock(); }
-
- const base::TickClock* tick_clock() const { return context_->tick_clock(); }
+ // Convenience methods for fetching things from the context_.
+ const base::Clock& clock() const { return context_->clock(); }
+ const base::TickClock& tick_clock() const { return context_->tick_clock(); }
+ PersistentReportingStore* store() { return context_->store(); }
ReportingContext* context_;
- // Stores cached data persistently, if not null. If |store_| is null, then the
- // ReportingCache will store data in memory only.
- // TODO(chlily): Implement.
- PersistentReportingStore* const store_;
-
// Owns all reports, keyed by const raw pointer for easier lookup.
std::unordered_map<const ReportingReport*, std::unique_ptr<ReportingReport>>
reports_;
diff --git a/chromium/net/reporting/reporting_cache_unittest.cc b/chromium/net/reporting/reporting_cache_unittest.cc
index 163ca17bc0b..11c8716e5e3 100644
--- a/chromium/net/reporting/reporting_cache_unittest.cc
+++ b/chromium/net/reporting/reporting_cache_unittest.cc
@@ -8,6 +8,7 @@
#include <string>
#include <utility>
+#include "base/bind.h"
#include "base/strings/string_number_conversions.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/test/values_test_util.h"
@@ -19,6 +20,7 @@
#include "net/reporting/reporting_endpoint.h"
#include "net/reporting/reporting_report.h"
#include "net/reporting/reporting_test_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "url/gurl.h"
#include "url/origin.h"
@@ -26,6 +28,8 @@
namespace net {
namespace {
+using CommandType = MockPersistentReportingStore::Command::Type;
+
class TestReportingCacheObserver : public ReportingCacheObserver {
public:
TestReportingCacheObserver()
@@ -71,6 +75,16 @@ class ReportingCacheTest : public ReportingTestBase,
~ReportingCacheTest() override { context()->RemoveCacheObserver(&observer_); }
+ void LoadReportingClients() {
+ // All ReportingCache methods assume that the store has been initialized.
+ if (store()) {
+ store()->LoadReportingClients(
+ base::BindOnce(&ReportingCache::AddClientsLoadedFromStore,
+ base::Unretained(cache())));
+ store()->FinishLoading(true);
+ }
+ }
+
TestReportingCacheObserver* observer() { return &observer_; }
size_t report_count() {
@@ -132,6 +146,8 @@ class ReportingCacheTest : public ReportingTestBase,
const url::Origin kOrigin2_ = url::Origin::Create(GURL("https://origin2/"));
const GURL kEndpoint1_ = GURL("https://endpoint1/");
const GURL kEndpoint2_ = GURL("https://endpoint2/");
+ const GURL kEndpoint3_ = GURL("https://endpoint3/");
+ const GURL kEndpoint4_ = GURL("https://endpoint4/");
const std::string kUserAgent_ = "Mozilla/1.0";
const std::string kGroup1_ = "group1";
const std::string kGroup2_ = "group2";
@@ -152,6 +168,8 @@ class ReportingCacheTest : public ReportingTestBase,
// header parser.
TEST_P(ReportingCacheTest, Reports) {
+ LoadReportingClients();
+
std::vector<const ReportingReport*> reports;
cache()->GetReports(&reports);
EXPECT_TRUE(reports.empty());
@@ -192,6 +210,8 @@ TEST_P(ReportingCacheTest, Reports) {
}
TEST_P(ReportingCacheTest, RemoveAllReports) {
+ LoadReportingClients();
+
cache()->AddReport(kUrl1_, kUserAgent_, kGroup1_, kType_,
std::make_unique<base::DictionaryValue>(), 0, kNowTicks_,
0);
@@ -212,6 +232,8 @@ TEST_P(ReportingCacheTest, RemoveAllReports) {
}
TEST_P(ReportingCacheTest, RemovePendingReports) {
+ LoadReportingClients();
+
cache()->AddReport(kUrl1_, kUserAgent_, kGroup1_, kType_,
std::make_unique<base::DictionaryValue>(), 0, kNowTicks_,
0);
@@ -244,6 +266,8 @@ TEST_P(ReportingCacheTest, RemovePendingReports) {
}
TEST_P(ReportingCacheTest, RemoveAllPendingReports) {
+ LoadReportingClients();
+
cache()->AddReport(kUrl1_, kUserAgent_, kGroup1_, kType_,
std::make_unique<base::DictionaryValue>(), 0, kNowTicks_,
0);
@@ -276,6 +300,8 @@ TEST_P(ReportingCacheTest, RemoveAllPendingReports) {
}
TEST_P(ReportingCacheTest, GetReportsAsValue) {
+ LoadReportingClients();
+
// We need a reproducible expiry timestamp for this test case.
const base::TimeTicks now = base::TimeTicks();
const ReportingReport* report1 =
@@ -346,6 +372,8 @@ TEST_P(ReportingCacheTest, GetReportsAsValue) {
}
TEST_P(ReportingCacheTest, Endpoints) {
+ LoadReportingClients();
+
EXPECT_EQ(0u, cache()->GetEndpointCount());
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint1_, kExpires1_));
EXPECT_EQ(1u, cache()->GetEndpointCount());
@@ -402,6 +430,8 @@ TEST_P(ReportingCacheTest, Endpoints) {
}
TEST_P(ReportingCacheTest, RemoveClient) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint1_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint2_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin2_, kGroup1_, kEndpoint1_, kExpires1_));
@@ -415,9 +445,41 @@ TEST_P(ReportingCacheTest, RemoveClient) {
EXPECT_EQ(2u, cache()->GetEndpointCount());
EXPECT_FALSE(OriginClientExistsInCache(kOrigin1_));
EXPECT_TRUE(OriginClientExistsInCache(kOrigin2_));
+
+ if (store()) {
+ store()->Flush();
+ // SetEndpointInCache doesn't update store counts, which is why they go
+ // negative here.
+ // TODO(crbug.com/895821): Populate the cache via the store so we don't need
+ // negative counts.
+ EXPECT_EQ(-2, store()->StoredEndpointsCount());
+ EXPECT_EQ(-1, store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ EXPECT_EQ(2,
+ store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(1, store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin1_, kGroup1_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_}));
+ EXPECT_THAT(store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
TEST_P(ReportingCacheTest, RemoveAllClients) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint1_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint2_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin2_, kGroup1_, kEndpoint1_, kExpires1_));
@@ -431,9 +493,59 @@ TEST_P(ReportingCacheTest, RemoveAllClients) {
EXPECT_EQ(0u, cache()->GetEndpointCount());
EXPECT_FALSE(OriginClientExistsInCache(kOrigin1_));
EXPECT_FALSE(OriginClientExistsInCache(kOrigin2_));
+
+ if (store()) {
+ store()->Flush();
+ // SetEndpointInCache doesn't update store counts, which is why they go
+ // negative here.
+ // TODO(crbug.com/895821): Populate the cache via the store so we don't need
+ // negative counts.
+ EXPECT_EQ(-4, store()->StoredEndpointsCount());
+ EXPECT_EQ(-3, store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ EXPECT_EQ(4,
+ store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(3, store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin2_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin1_, kGroup1_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin2_, kGroup1_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin2_, kGroup2_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
TEST_P(ReportingCacheTest, RemoveEndpointGroup) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint1_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint2_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin2_, kGroup1_, kEndpoint1_, kExpires1_));
@@ -465,9 +577,46 @@ TEST_P(ReportingCacheTest, RemoveEndpointGroup) {
EXPECT_TRUE(OriginClientExistsInCache(kOrigin1_));
EXPECT_TRUE(EndpointGroupExistsInCache(
kOrigin1_, kGroup1_, OriginSubdomains::DEFAULT, kExpires1_));
+
+ if (store()) {
+ store()->Flush();
+ // SetEndpointInCache doesn't update store counts, which is why they go
+ // negative here.
+ // TODO(crbug.com/895821): Populate the cache via the store so we don't need
+ // negative counts.
+ EXPECT_EQ(-2, store()->StoredEndpointsCount());
+ EXPECT_EQ(-2, store()->StoredEndpointGroupsCount());
+ EXPECT_EQ(2,
+ store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(2, store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin2_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin2_, kGroup1_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin2_, kGroup2_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
TEST_P(ReportingCacheTest, RemoveEndpointsForUrl) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint1_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint2_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin2_, kGroup1_, kEndpoint1_, kExpires1_));
@@ -497,9 +646,41 @@ TEST_P(ReportingCacheTest, RemoveEndpointsForUrl) {
EXPECT_TRUE(FindEndpointInCache(kOrigin1_, kGroup1_, kEndpoint2_));
EXPECT_FALSE(FindEndpointInCache(kOrigin2_, kGroup1_, kEndpoint1_));
EXPECT_TRUE(FindEndpointInCache(kOrigin2_, kGroup2_, kEndpoint2_));
+
+ if (store()) {
+ store()->Flush();
+ // SetEndpointInCache doesn't update store counts, which is why they go
+ // negative here.
+ // TODO(crbug.com/895821): Populate the cache via the store so we don't need
+ // negative counts.
+ EXPECT_EQ(-2, store()->StoredEndpointsCount());
+ EXPECT_EQ(-1, store()->StoredEndpointGroupsCount());
+ EXPECT_EQ(2,
+ store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(1, store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin2_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin2_, kGroup1_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
TEST_P(ReportingCacheTest, GetClientsAsValue) {
+ LoadReportingClients();
+
// These times are bogus but we need a reproducible expiry timestamp for this
// test case.
const base::TimeTicks expires_ticks =
@@ -562,6 +743,8 @@ TEST_P(ReportingCacheTest, GetClientsAsValue) {
}
TEST_P(ReportingCacheTest, GetCandidateEndpointsForDelivery) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint1_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint2_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin2_, kGroup1_, kEndpoint1_, kExpires1_));
@@ -582,6 +765,8 @@ TEST_P(ReportingCacheTest, GetCandidateEndpointsForDelivery) {
}
TEST_P(ReportingCacheTest, GetCandidateEndpointsExcludesExpired) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint1_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, kEndpoint2_, kExpires1_));
ASSERT_TRUE(SetEndpointInCache(kOrigin2_, kGroup1_, kEndpoint1_, kExpires1_));
@@ -606,6 +791,8 @@ TEST_P(ReportingCacheTest, GetCandidateEndpointsExcludesExpired) {
}
TEST_P(ReportingCacheTest, ExcludeSubdomainsDifferentPort) {
+ LoadReportingClients();
+
const url::Origin kOrigin = url::Origin::Create(GURL("https://example/"));
const url::Origin kDifferentPortOrigin =
url::Origin::Create(GURL("https://example:444/"));
@@ -619,6 +806,8 @@ TEST_P(ReportingCacheTest, ExcludeSubdomainsDifferentPort) {
}
TEST_P(ReportingCacheTest, ExcludeSubdomainsSuperdomain) {
+ LoadReportingClients();
+
const url::Origin kOrigin = url::Origin::Create(GURL("https://foo.example/"));
const url::Origin kSuperOrigin =
url::Origin::Create(GURL("https://example/"));
@@ -632,6 +821,8 @@ TEST_P(ReportingCacheTest, ExcludeSubdomainsSuperdomain) {
}
TEST_P(ReportingCacheTest, IncludeSubdomainsDifferentPort) {
+ LoadReportingClients();
+
const url::Origin kOrigin = url::Origin::Create(GURL("https://example/"));
const url::Origin kDifferentPortOrigin =
url::Origin::Create(GURL("https://example:444/"));
@@ -646,6 +837,8 @@ TEST_P(ReportingCacheTest, IncludeSubdomainsDifferentPort) {
}
TEST_P(ReportingCacheTest, IncludeSubdomainsSuperdomain) {
+ LoadReportingClients();
+
const url::Origin kOrigin = url::Origin::Create(GURL("https://foo.example/"));
const url::Origin kSuperOrigin =
url::Origin::Create(GURL("https://example/"));
@@ -660,6 +853,8 @@ TEST_P(ReportingCacheTest, IncludeSubdomainsSuperdomain) {
}
TEST_P(ReportingCacheTest, IncludeSubdomainsPreferOriginToDifferentPort) {
+ LoadReportingClients();
+
const url::Origin kOrigin = url::Origin::Create(GURL("https://foo.example/"));
const url::Origin kDifferentPortOrigin =
url::Origin::Create(GURL("https://example:444/"));
@@ -676,6 +871,8 @@ TEST_P(ReportingCacheTest, IncludeSubdomainsPreferOriginToDifferentPort) {
}
TEST_P(ReportingCacheTest, IncludeSubdomainsPreferOriginToSuperdomain) {
+ LoadReportingClients();
+
const url::Origin kOrigin = url::Origin::Create(GURL("https://foo.example/"));
const url::Origin kSuperOrigin =
url::Origin::Create(GURL("https://example/"));
@@ -692,6 +889,8 @@ TEST_P(ReportingCacheTest, IncludeSubdomainsPreferOriginToSuperdomain) {
}
TEST_P(ReportingCacheTest, IncludeSubdomainsPreferMoreSpecificSuperdomain) {
+ LoadReportingClients();
+
const url::Origin kOrigin =
url::Origin::Create(GURL("https://foo.bar.example/"));
const url::Origin kSuperOrigin =
@@ -711,6 +910,8 @@ TEST_P(ReportingCacheTest, IncludeSubdomainsPreferMoreSpecificSuperdomain) {
}
TEST_P(ReportingCacheTest, EvictOldestReport) {
+ LoadReportingClients();
+
size_t max_report_count = policy().max_report_count;
ASSERT_LT(0u, max_report_count);
@@ -742,6 +943,8 @@ TEST_P(ReportingCacheTest, EvictOldestReport) {
}
TEST_P(ReportingCacheTest, DontEvictPendingReports) {
+ LoadReportingClients();
+
size_t max_report_count = policy().max_report_count;
ASSERT_LT(0u, max_report_count);
@@ -777,6 +980,8 @@ TEST_P(ReportingCacheTest, DontEvictPendingReports) {
}
TEST_P(ReportingCacheTest, EvictEndpointsOverPerOriginLimit) {
+ LoadReportingClients();
+
for (size_t i = 0; i < policy().max_endpoints_per_origin; ++i) {
ASSERT_TRUE(
SetEndpointInCache(kOrigin1_, kGroup1_, MakeURL(i), kExpires1_));
@@ -789,6 +994,8 @@ TEST_P(ReportingCacheTest, EvictEndpointsOverPerOriginLimit) {
}
TEST_P(ReportingCacheTest, EvictExpiredGroups) {
+ LoadReportingClients();
+
for (size_t i = 0; i < policy().max_endpoints_per_origin; ++i) {
ASSERT_TRUE(
SetEndpointInCache(kOrigin1_, kGroup1_, MakeURL(i), kExpires1_));
@@ -813,6 +1020,8 @@ TEST_P(ReportingCacheTest, EvictExpiredGroups) {
}
TEST_P(ReportingCacheTest, EvictStaleGroups) {
+ LoadReportingClients();
+
for (size_t i = 0; i < policy().max_endpoints_per_origin; ++i) {
ASSERT_TRUE(
SetEndpointInCache(kOrigin1_, kGroup1_, MakeURL(i), kExpires1_));
@@ -836,6 +1045,8 @@ TEST_P(ReportingCacheTest, EvictStaleGroups) {
}
TEST_P(ReportingCacheTest, EvictFromStalestGroup) {
+ LoadReportingClients();
+
for (size_t i = 0; i < policy().max_endpoints_per_origin; ++i) {
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, base::NumberToString(i),
MakeURL(i), kExpires1_));
@@ -866,6 +1077,8 @@ TEST_P(ReportingCacheTest, EvictFromStalestGroup) {
}
TEST_P(ReportingCacheTest, EvictFromLargestGroup) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, MakeURL(0), kExpires1_));
// This group should be evicted from because it has 2 endpoints.
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup2_, MakeURL(1), kExpires1_));
@@ -890,6 +1103,8 @@ TEST_P(ReportingCacheTest, EvictFromLargestGroup) {
}
TEST_P(ReportingCacheTest, EvictLeastImportantEndpoint) {
+ LoadReportingClients();
+
ASSERT_TRUE(SetEndpointInCache(kOrigin1_, kGroup1_, MakeURL(0), kExpires1_,
OriginSubdomains::DEFAULT, 1 /* priority*/,
1 /* weight */));
@@ -917,6 +1132,8 @@ TEST_P(ReportingCacheTest, EvictLeastImportantEndpoint) {
}
TEST_P(ReportingCacheTest, EvictEndpointsOverGlobalLimitFromStalestClient) {
+ LoadReportingClients();
+
// Set enough endpoints to reach the global endpoint limit.
for (size_t i = 0; i < policy().max_endpoint_count; ++i) {
ASSERT_TRUE(SetEndpointInCache(url::Origin::Create(MakeURL(i)), kGroup1_,
@@ -938,6 +1155,180 @@ TEST_P(ReportingCacheTest, EvictEndpointsOverGlobalLimitFromStalestClient) {
EXPECT_TRUE(OriginClientExistsInCache(kOrigin1_));
}
+TEST_P(ReportingCacheTest, AddClientsLoadedFromStore) {
+ if (!store())
+ return;
+
+ base::Time now = clock()->Now();
+
+ std::vector<ReportingEndpoint> endpoints;
+ endpoints.emplace_back(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ endpoints.emplace_back(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_});
+ endpoints.emplace_back(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_});
+ endpoints.emplace_back(kOrigin2_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ std::vector<CachedReportingEndpointGroup> groups;
+ groups.emplace_back(kOrigin2_, kGroup1_, OriginSubdomains::DEFAULT,
+ now + base::TimeDelta::FromMinutes(2) /* expires */,
+ now /* last_used */);
+ groups.emplace_back(kOrigin1_, kGroup1_, OriginSubdomains::DEFAULT,
+ now + base::TimeDelta::FromMinutes(1) /* expires */,
+ now /* last_used */);
+ groups.emplace_back(kOrigin2_, kGroup2_, OriginSubdomains::DEFAULT,
+ now + base::TimeDelta::FromMinutes(3) /* expires */,
+ now /* last_used */);
+ store()->SetPrestoredClients(endpoints, groups);
+
+ LoadReportingClients();
+
+ EXPECT_EQ(4u, cache()->GetEndpointCount());
+ EXPECT_EQ(3u, cache()->GetEndpointGroupCountForTesting());
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin1_, kGroup1_, kEndpoint1_));
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin1_, kGroup1_, kEndpoint2_));
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin2_, kGroup1_, kEndpoint1_));
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin2_, kGroup2_, kEndpoint2_));
+ EXPECT_TRUE(
+ EndpointGroupExistsInCache(kOrigin1_, kGroup1_, OriginSubdomains::DEFAULT,
+ now + base::TimeDelta::FromMinutes(1)));
+ EXPECT_TRUE(
+ EndpointGroupExistsInCache(kOrigin2_, kGroup1_, OriginSubdomains::DEFAULT,
+ now + base::TimeDelta::FromMinutes(2)));
+ EXPECT_TRUE(
+ EndpointGroupExistsInCache(kOrigin2_, kGroup2_, OriginSubdomains::DEFAULT,
+ now + base::TimeDelta::FromMinutes(3)));
+ EXPECT_TRUE(OriginClientExistsInCache(kOrigin1_));
+ EXPECT_TRUE(OriginClientExistsInCache(kOrigin2_));
+}
+
+TEST_P(ReportingCacheTest, DoNotStoreMoreThanLimits) {
+ if (!store())
+ return;
+
+ base::Time now = clock()->Now();
+
+ // We hardcode the number of endpoints in this test, so we need to manually
+ // update the test when |max_endpoint_count| changes. You'll need to
+ // add/remove elements to |endpoints| when that happens.
+ EXPECT_EQ(5u, policy().max_endpoint_count) << "You need to update this test "
+ << "to reflect a change in "
+ << "max_endpoint_count";
+
+ std::vector<ReportingEndpoint> endpoints;
+ endpoints.emplace_back(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ endpoints.emplace_back(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_});
+ endpoints.emplace_back(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint3_});
+ endpoints.emplace_back(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint4_});
+ endpoints.emplace_back(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ endpoints.emplace_back(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_});
+ endpoints.emplace_back(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint3_});
+ endpoints.emplace_back(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint4_});
+ std::vector<CachedReportingEndpointGroup> groups;
+ groups.emplace_back(kOrigin1_, kGroup1_, OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ groups.emplace_back(kOrigin2_, kGroup2_, OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ store()->SetPrestoredClients(endpoints, groups);
+
+ LoadReportingClients();
+
+ EXPECT_GE(5u, cache()->GetEndpointCount());
+ EXPECT_GE(2u, cache()->GetEndpointGroupCountForTesting());
+}
+
+TEST_P(ReportingCacheTest, DoNotLoadMismatchedGroupsAndEndpoints) {
+ if (!store())
+ return;
+
+ base::Time now = clock()->Now();
+
+ std::vector<ReportingEndpoint> endpoints;
+ // This endpoint has no corresponding endpoint group
+ endpoints.emplace_back(kOrigin1_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ endpoints.emplace_back(kOrigin2_, kGroup1_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ // This endpoint has no corresponding endpoint group
+ endpoints.emplace_back(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ std::vector<CachedReportingEndpointGroup> groups;
+ // This endpoint group has no corresponding endpoint
+ groups.emplace_back(kOrigin1_, kGroup2_, OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ groups.emplace_back(kOrigin2_, kGroup1_, OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ // This endpoint group has no corresponding endpoint
+ groups.emplace_back(kOrigin2_, "last_group", OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ store()->SetPrestoredClients(endpoints, groups);
+
+ LoadReportingClients();
+
+ EXPECT_GE(1u, cache()->GetEndpointCount());
+ EXPECT_GE(1u, cache()->GetEndpointGroupCountForTesting());
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin2_, kGroup1_, kEndpoint1_));
+}
+
+// This test verifies that we preserve the last_used field when storing clients
+// loaded from disk. We don't have direct access into individual cache elements,
+// so we test this indirectly by triggering a cache eviction and verifying that
+// a stale element (i.e., one older than a week, by default) is selected for
+// eviction. If last_used weren't populated then presumably that element
+// wouldn't be evicted. (Or rather, it would only have a 25% chance of being
+// evicted and this test would then be flaky.)
+TEST_P(ReportingCacheTest, StoreLastUsedProperly) {
+ if (!store())
+ return;
+
+ base::Time now = clock()->Now();
+
+ // We hardcode the number of endpoints in this test, so we need to manually
+ // update the test when |max_endpoints_per_origin| changes. You'll need to
+ // add/remove elements to |endpoints| and |grups| when that happens.
+ EXPECT_EQ(3u, policy().max_endpoints_per_origin)
+ << "You need to update this test to reflect a change in "
+ "max_endpoints_per_origin";
+
+ // We need more than three endpoints to trigger eviction.
+ std::vector<ReportingEndpoint> endpoints;
+ endpoints.emplace_back(kOrigin1_, "1",
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ endpoints.emplace_back(kOrigin1_, "2",
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ endpoints.emplace_back(kOrigin1_, "3",
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ endpoints.emplace_back(kOrigin1_, "4",
+ ReportingEndpoint::EndpointInfo{kEndpoint1_});
+ std::vector<CachedReportingEndpointGroup> groups;
+ groups.emplace_back(kOrigin1_, "1", OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ groups.emplace_back(kOrigin1_, "2", OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ // Stale last_used on group "3" should cause us to select it for eviction
+ groups.emplace_back(kOrigin1_, "3", OriginSubdomains::DEFAULT,
+ now /* expires */, base::Time() /* last_used */);
+ groups.emplace_back(kOrigin1_, "4", OriginSubdomains::DEFAULT,
+ now /* expires */, now /* last_used */);
+ store()->SetPrestoredClients(endpoints, groups);
+
+ LoadReportingClients();
+
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin1_, "1", kEndpoint1_));
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin1_, "2", kEndpoint1_));
+ EXPECT_FALSE(EndpointExistsInCache(kOrigin1_, "3", kEndpoint1_));
+ EXPECT_TRUE(EndpointExistsInCache(kOrigin1_, "4", kEndpoint1_));
+}
+
INSTANTIATE_TEST_SUITE_P(ReportingCacheStoreTest,
ReportingCacheTest,
testing::Bool());
diff --git a/chromium/net/reporting/reporting_context.cc b/chromium/net/reporting/reporting_context.cc
index 64cbb887f6d..1c38d6f8b2c 100644
--- a/chromium/net/reporting/reporting_context.cc
+++ b/chromium/net/reporting/reporting_context.cc
@@ -77,8 +77,18 @@ void ReportingContext::NotifyCachedClientsUpdated() {
observer.OnClientsUpdated();
}
+bool ReportingContext::IsReportDataPersisted() const {
+ return store_ && policy_.persist_reports_across_restarts;
+}
+
+bool ReportingContext::IsClientDataPersisted() const {
+ return store_ && policy_.persist_clients_across_restarts;
+}
+
void ReportingContext::OnShutdown() {
uploader_->OnShutdown();
+ if (store_)
+ store_->Flush();
}
ReportingContext::ReportingContext(
@@ -94,7 +104,8 @@ ReportingContext::ReportingContext(
tick_clock_(tick_clock),
uploader_(std::move(uploader)),
delegate_(std::move(delegate)),
- cache_(ReportingCache::Create(this, store)),
+ cache_(ReportingCache::Create(this)),
+ store_(store),
endpoint_manager_(ReportingEndpointManager::Create(this, rand_callback)),
delivery_agent_(ReportingDeliveryAgent::Create(this)),
garbage_collector_(ReportingGarbageCollector::Create(this)),
diff --git a/chromium/net/reporting/reporting_context.h b/chromium/net/reporting/reporting_context.h
index 233dbc73938..b418868f7e3 100644
--- a/chromium/net/reporting/reporting_context.h
+++ b/chromium/net/reporting/reporting_context.h
@@ -43,14 +43,14 @@ class NET_EXPORT ReportingContext {
~ReportingContext();
- const ReportingPolicy& policy() { return policy_; }
+ const ReportingPolicy& policy() const { return policy_; }
- base::Clock* clock() { return clock_; }
- const base::TickClock* tick_clock() { return tick_clock_; }
+ const base::Clock& clock() const { return *clock_; }
+ const base::TickClock& tick_clock() const { return *tick_clock_; }
ReportingUploader* uploader() { return uploader_.get(); }
-
ReportingDelegate* delegate() { return delegate_.get(); }
ReportingCache* cache() { return cache_.get(); }
+ ReportingCache::PersistentReportingStore* store() { return store_; }
ReportingEndpointManager* endpoint_manager() {
return endpoint_manager_.get();
}
@@ -65,6 +65,11 @@ class NET_EXPORT ReportingContext {
void NotifyCachedReportsUpdated();
void NotifyCachedClientsUpdated();
+ // Returns whether the data in the cache is persisted across restarts in the
+ // PersistentReportingStore.
+ bool IsReportDataPersisted() const;
+ bool IsClientDataPersisted() const;
+
void OnShutdown();
protected:
@@ -90,6 +95,8 @@ class NET_EXPORT ReportingContext {
std::unique_ptr<ReportingCache> cache_;
+ ReportingCache::PersistentReportingStore* const store_;
+
// |endpoint_manager_| must come after |tick_clock_| and |cache_|.
std::unique_ptr<ReportingEndpointManager> endpoint_manager_;
diff --git a/chromium/net/reporting/reporting_delivery_agent.cc b/chromium/net/reporting/reporting_delivery_agent.cc
index 2745cafffc4..7d143363a23 100644
--- a/chromium/net/reporting/reporting_delivery_agent.cc
+++ b/chromium/net/reporting/reporting_delivery_agent.cc
@@ -55,9 +55,7 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
public ReportingCacheObserver {
public:
ReportingDeliveryAgentImpl(ReportingContext* context)
- : context_(context),
- timer_(std::make_unique<base::OneShotTimer>()),
- weak_factory_(this) {
+ : context_(context), timer_(std::make_unique<base::OneShotTimer>()) {
context_->AddCacheObserver(this);
}
@@ -170,7 +168,7 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
const url::Origin& report_origin = origin_group.first;
const std::string& group = origin_group.second;
- if (base::ContainsKey(pending_origin_groups_, origin_group))
+ if (base::Contains(pending_origin_groups_, origin_group))
continue;
const ReportingEndpoint endpoint =
@@ -209,7 +207,7 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
std::unique_ptr<Delivery>& delivery = it.second;
std::string json;
- SerializeReports(delivery->reports, tick_clock()->NowTicks(), &json);
+ SerializeReports(delivery->reports, tick_clock().NowTicks(), &json);
int max_depth = 0;
for (const ReportingReport* report : delivery->reports) {
@@ -261,8 +259,8 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
cache()->ClearReportsPending(delivery->reports);
}
- const ReportingPolicy& policy() { return context_->policy(); }
- const base::TickClock* tick_clock() { return context_->tick_clock(); }
+ const ReportingPolicy& policy() const { return context_->policy(); }
+ const base::TickClock& tick_clock() const { return context_->tick_clock(); }
ReportingDelegate* delegate() { return context_->delegate(); }
ReportingCache* cache() { return context_->cache(); }
ReportingUploader* uploader() { return context_->uploader(); }
@@ -278,7 +276,7 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
// (Would be an unordered_set, but there's no hash on pair.)
std::set<OriginGroup> pending_origin_groups_;
- base::WeakPtrFactory<ReportingDeliveryAgentImpl> weak_factory_;
+ base::WeakPtrFactory<ReportingDeliveryAgentImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(ReportingDeliveryAgentImpl);
};
diff --git a/chromium/net/reporting/reporting_endpoint.cc b/chromium/net/reporting/reporting_endpoint.cc
index 506a860fc35..3b5b790842d 100644
--- a/chromium/net/reporting/reporting_endpoint.cc
+++ b/chromium/net/reporting/reporting_endpoint.cc
@@ -32,6 +32,11 @@ bool operator<(const ReportingEndpointGroupKey& lhs,
return std::tie(lhs.origin, lhs.group_name) <
std::tie(rhs.origin, rhs.group_name);
}
+bool operator>(const ReportingEndpointGroupKey& lhs,
+ const ReportingEndpointGroupKey& rhs) {
+ return std::tie(lhs.origin, lhs.group_name) >
+ std::tie(rhs.origin, rhs.group_name);
+}
const int ReportingEndpoint::EndpointInfo::kDefaultPriority = 1;
const int ReportingEndpoint::EndpointInfo::kDefaultWeight = 1;
@@ -51,6 +56,10 @@ ReportingEndpoint::ReportingEndpoint(url::Origin origin,
ReportingEndpoint::ReportingEndpoint(const ReportingEndpoint& other) = default;
ReportingEndpoint::ReportingEndpoint(ReportingEndpoint&& other) = default;
+ReportingEndpoint& ReportingEndpoint::operator=(const ReportingEndpoint&) =
+ default;
+ReportingEndpoint& ReportingEndpoint::operator=(ReportingEndpoint&&) = default;
+
ReportingEndpoint::~ReportingEndpoint() = default;
bool ReportingEndpoint::is_valid() const {
diff --git a/chromium/net/reporting/reporting_endpoint.h b/chromium/net/reporting/reporting_endpoint.h
index 9447fb0e8c6..a6801e2ea2a 100644
--- a/chromium/net/reporting/reporting_endpoint.h
+++ b/chromium/net/reporting/reporting_endpoint.h
@@ -21,10 +21,10 @@ struct NET_EXPORT ReportingEndpointGroupKey {
ReportingEndpointGroupKey(url::Origin origin, std::string group_name);
// Origin that configured this endpoint group.
- const url::Origin origin;
+ url::Origin origin;
// Name of the endpoint group (defaults to "default" during header parsing).
- const std::string group_name;
+ std::string group_name;
};
NET_EXPORT bool operator==(const ReportingEndpointGroupKey& lhs,
@@ -33,6 +33,8 @@ NET_EXPORT bool operator!=(const ReportingEndpointGroupKey& lhs,
const ReportingEndpointGroupKey& rhs);
NET_EXPORT bool operator<(const ReportingEndpointGroupKey& lhs,
const ReportingEndpointGroupKey& rhs);
+NET_EXPORT bool operator>(const ReportingEndpointGroupKey& lhs,
+ const ReportingEndpointGroupKey& rhs);
// The configuration by an origin to use an endpoint for report delivery.
// TODO(crbug.com/921049): Rename to ReportingEndpoint because that's what it
@@ -81,13 +83,16 @@ struct NET_EXPORT ReportingEndpoint {
ReportingEndpoint(const ReportingEndpoint& other);
ReportingEndpoint(ReportingEndpoint&& other);
+ ReportingEndpoint& operator=(const ReportingEndpoint&);
+ ReportingEndpoint& operator=(ReportingEndpoint&&);
+
~ReportingEndpoint();
bool is_valid() const;
explicit operator bool() const { return is_valid(); }
// Identifies the endpoint group to which this endpoint belongs.
- const ReportingEndpointGroupKey group_key;
+ ReportingEndpointGroupKey group_key;
// URL, priority, and weight of the endpoint.
EndpointInfo info;
@@ -138,7 +143,7 @@ struct NET_EXPORT CachedReportingEndpointGroup {
base::Time now);
// Origin and group name.
- const ReportingEndpointGroupKey group_key;
+ ReportingEndpointGroupKey group_key;
// Whether this group applies to subdomains of |group_key.origin|.
OriginSubdomains include_subdomains = OriginSubdomains::DEFAULT;
diff --git a/chromium/net/reporting/reporting_endpoint_manager.cc b/chromium/net/reporting/reporting_endpoint_manager.cc
index ec6f3863e09..8ed18fa9b8c 100644
--- a/chromium/net/reporting/reporting_endpoint_manager.cc
+++ b/chromium/net/reporting/reporting_endpoint_manager.cc
@@ -50,7 +50,7 @@ class ReportingEndpointManagerImpl : public ReportingEndpointManager {
int total_weight = 0;
for (const ReportingEndpoint endpoint : endpoints) {
- if (base::ContainsKey(endpoint_backoff_, endpoint.info.url) &&
+ if (base::Contains(endpoint_backoff_, endpoint.info.url) &&
endpoint_backoff_[endpoint.info.url]->ShouldRejectRequest()) {
continue;
}
@@ -102,16 +102,16 @@ class ReportingEndpointManagerImpl : public ReportingEndpointManager {
}
void InformOfEndpointRequest(const GURL& endpoint, bool succeeded) override {
- if (!base::ContainsKey(endpoint_backoff_, endpoint)) {
+ if (!base::Contains(endpoint_backoff_, endpoint)) {
endpoint_backoff_[endpoint] = std::make_unique<BackoffEntry>(
- &policy().endpoint_backoff_policy, tick_clock());
+ &policy().endpoint_backoff_policy, &tick_clock());
}
endpoint_backoff_[endpoint]->InformOfRequest(succeeded);
}
private:
- const ReportingPolicy& policy() { return context_->policy(); }
- const base::TickClock* tick_clock() { return context_->tick_clock(); }
+ const ReportingPolicy& policy() const { return context_->policy(); }
+ const base::TickClock& tick_clock() const { return context_->tick_clock(); }
ReportingDelegate* delegate() { return context_->delegate(); }
ReportingCache* cache() { return context_->cache(); }
diff --git a/chromium/net/reporting/reporting_garbage_collector.cc b/chromium/net/reporting/reporting_garbage_collector.cc
index 451f762e749..f524bfd0639 100644
--- a/chromium/net/reporting/reporting_garbage_collector.cc
+++ b/chromium/net/reporting/reporting_garbage_collector.cc
@@ -54,7 +54,7 @@ class ReportingGarbageCollectorImpl : public ReportingGarbageCollector,
// TODO(crbug.com/912622): Garbage collect clients, reports with no matching
// endpoints.
void CollectGarbage() {
- base::TimeTicks now = context_->tick_clock()->NowTicks();
+ base::TimeTicks now = context_->tick_clock().NowTicks();
const ReportingPolicy& policy = context_->policy();
std::vector<const ReportingReport*> all_reports;
diff --git a/chromium/net/reporting/reporting_header_parser.cc b/chromium/net/reporting/reporting_header_parser.cc
index 222a92e7288..19a407ac41a 100644
--- a/chromium/net/reporting/reporting_header_parser.cc
+++ b/chromium/net/reporting/reporting_header_parser.cc
@@ -14,6 +14,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/time/time.h"
#include "base/values.h"
+#include "net/base/registry_controlled_domains/registry_controlled_domain.h"
#include "net/reporting/reporting_cache.h"
#include "net/reporting/reporting_context.h"
#include "net/reporting/reporting_delegate.h"
@@ -143,6 +144,15 @@ HeaderEndpointGroupOutcome ProcessEndpointGroup(
if (dict->HasKey(kIncludeSubdomainsKey) &&
dict->GetBoolean(kIncludeSubdomainsKey, &subdomains_bool) &&
subdomains_bool == true) {
+ // Disallow eTLDs from setting include_subdomains endpoint groups.
+ if (registry_controlled_domains::GetRegistryLength(
+ origin.GetURL(),
+ registry_controlled_domains::INCLUDE_UNKNOWN_REGISTRIES,
+ registry_controlled_domains::INCLUDE_PRIVATE_REGISTRIES) == 0) {
+ return HeaderEndpointGroupOutcome::
+ DISCARDED_INCLUDE_SUBDOMAINS_NOT_ALLOWED;
+ }
+
parsed_endpoint_group_out->include_subdomains = OriginSubdomains::INCLUDE;
}
diff --git a/chromium/net/reporting/reporting_header_parser.h b/chromium/net/reporting/reporting_header_parser.h
index 8de7d2fa644..5888426547c 100644
--- a/chromium/net/reporting/reporting_header_parser.h
+++ b/chromium/net/reporting/reporting_header_parser.h
@@ -51,6 +51,7 @@ class NET_EXPORT ReportingHeaderParser {
PARSED = 7,
REMOVED_TTL_ZERO = 8,
REMOVED_EMPTY = 9,
+ DISCARDED_INCLUDE_SUBDOMAINS_NOT_ALLOWED = 10,
MAX
};
diff --git a/chromium/net/reporting/reporting_header_parser_unittest.cc b/chromium/net/reporting/reporting_header_parser_unittest.cc
index 08f24e489bb..48bda761e22 100644
--- a/chromium/net/reporting/reporting_header_parser_unittest.cc
+++ b/chromium/net/reporting/reporting_header_parser_unittest.cc
@@ -8,12 +8,14 @@
#include <string>
#include <vector>
+#include "base/bind.h"
#include "base/json/json_reader.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/time/time.h"
#include "base/values.h"
+#include "net/reporting/mock_persistent_reporting_store.h"
#include "net/reporting/reporting_cache.h"
#include "net/reporting/reporting_endpoint.h"
#include "net/reporting/reporting_test_util.h"
@@ -24,17 +26,40 @@
namespace net {
namespace {
-class ReportingHeaderParserTest : public ReportingTestBase {
+using CommandType = MockPersistentReportingStore::Command::Type;
+
+// This test is parametrized on a boolean that represents whether to use a
+// MockPersistentReportingStore.
+class ReportingHeaderParserTest : public ReportingTestBase,
+ public ::testing::WithParamInterface<bool> {
protected:
ReportingHeaderParserTest() : ReportingTestBase() {
ReportingPolicy policy;
policy.max_endpoints_per_origin = 10;
policy.max_endpoint_count = 20;
UsePolicy(policy);
+
+ if (GetParam())
+ store_ = std::make_unique<MockPersistentReportingStore>();
+ else
+ store_ = nullptr;
+ UseStore(store_.get());
}
~ReportingHeaderParserTest() override = default;
+ void SetUp() override {
+ // All ReportingCache methods assume that the store has been initialized.
+ if (mock_store()) {
+ mock_store()->LoadReportingClients(
+ base::BindOnce(&ReportingCache::AddClientsLoadedFromStore,
+ base::Unretained(cache())));
+ mock_store()->FinishLoading(true);
+ }
+ }
+
+ MockPersistentReportingStore* mock_store() { return store_.get(); }
+
ReportingEndpointGroup MakeEndpointGroup(
std::string name,
std::vector<ReportingEndpoint::EndpointInfo> endpoints,
@@ -112,18 +137,24 @@ class ReportingHeaderParserTest : public ReportingTestBase {
const GURL kUrl2_ = GURL("https://origin2.test/path");
const url::Origin kOrigin2_ =
url::Origin::Create(GURL("https://origin2.test/"));
+ const GURL kUrlEtld_ = GURL("https://co.uk/foo.html/");
+ const url::Origin kOriginEtld_ = url::Origin::Create(kUrlEtld_);
const GURL kEndpoint_ = GURL("https://endpoint.test/");
const GURL kEndpoint2_ = GURL("https://endpoint2.test/");
+ const GURL kEndpoint3_ = GURL("https://endpoint3.test/");
const std::string kGroup_ = "group";
const std::string kGroup2_ = "group2";
const std::string kType_ = "type";
+
+ private:
+ std::unique_ptr<MockPersistentReportingStore> store_;
};
// TODO(juliatuttle): Ideally these tests should be expecting that JSON parsing
// (and therefore header parsing) may happen asynchronously, but the entire
// pipeline is also tested by NetworkErrorLoggingEndToEndTest.
-TEST_F(ReportingHeaderParserTest, Invalid) {
+TEST_P(ReportingHeaderParserTest, Invalid) {
static const struct {
const char* header_value;
const char* description;
@@ -172,10 +203,16 @@ TEST_F(ReportingHeaderParserTest, Invalid) {
EXPECT_EQ(0u, cache()->GetEndpointCount())
<< "Invalid Report-To header (" << test_case.description << ": \""
<< test_case.header_value << "\") parsed as valid.";
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(0, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(0, mock_store()->StoredEndpointGroupsCount());
+ }
}
}
-TEST_F(ReportingHeaderParserTest, Basic) {
+TEST_P(ReportingHeaderParserTest, Basic) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_}};
std::string header =
@@ -197,9 +234,27 @@ TEST_F(ReportingHeaderParserTest, Basic) {
endpoint.info.priority);
EXPECT_EQ(ReportingEndpoint::EndpointInfo::kDefaultWeight,
endpoint.info.weight);
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, OmittedGroupName) {
+TEST_P(ReportingHeaderParserTest, OmittedGroupName) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_}};
std::string header =
ConstructHeaderGroupString(MakeEndpointGroup(std::string(), endpoints));
@@ -220,9 +275,27 @@ TEST_F(ReportingHeaderParserTest, OmittedGroupName) {
endpoint.info.priority);
EXPECT_EQ(ReportingEndpoint::EndpointInfo::kDefaultWeight,
endpoint.info.weight);
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "default",
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "default", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, IncludeSubdomainsTrue) {
+TEST_P(ReportingHeaderParserTest, IncludeSubdomainsTrue) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_}};
std::string header = ConstructHeaderGroupString(
@@ -234,9 +307,27 @@ TEST_F(ReportingHeaderParserTest, IncludeSubdomainsTrue) {
EndpointGroupExistsInCache(kOrigin_, kGroup_, OriginSubdomains::INCLUDE));
EXPECT_EQ(1u, cache()->GetEndpointCount());
EXPECT_TRUE(EndpointExistsInCache(kOrigin_, kGroup_, kEndpoint_));
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, IncludeSubdomainsFalse) {
+TEST_P(ReportingHeaderParserTest, IncludeSubdomainsFalse) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_}};
std::string header = ConstructHeaderGroupString(
@@ -249,9 +340,55 @@ TEST_F(ReportingHeaderParserTest, IncludeSubdomainsFalse) {
EndpointGroupExistsInCache(kOrigin_, kGroup_, OriginSubdomains::EXCLUDE));
EXPECT_EQ(1u, cache()->GetEndpointCount());
EXPECT_TRUE(EndpointExistsInCache(kOrigin_, kGroup_, kEndpoint_));
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
+}
+
+TEST_P(ReportingHeaderParserTest, IncludeSubdomainsEtldRejected) {
+ std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_}};
+
+ std::string header = ConstructHeaderGroupString(
+ MakeEndpointGroup(kGroup_, endpoints, OriginSubdomains::INCLUDE));
+ ParseHeader(kUrlEtld_, header);
+
+ EXPECT_EQ(0u, cache()->GetEndpointGroupCountForTesting());
+ EXPECT_FALSE(EndpointGroupExistsInCache(kOriginEtld_, kGroup_,
+ OriginSubdomains::INCLUDE));
+ EXPECT_EQ(0u, cache()->GetEndpointCount());
+ EXPECT_FALSE(EndpointExistsInCache(kOriginEtld_, kGroup_, kEndpoint_));
}
-TEST_F(ReportingHeaderParserTest, IncludeSubdomainsNotBoolean) {
+TEST_P(ReportingHeaderParserTest, NonIncludeSubdomainsEtldAccepted) {
+ std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_}};
+
+ std::string header = ConstructHeaderGroupString(
+ MakeEndpointGroup(kGroup_, endpoints, OriginSubdomains::EXCLUDE));
+ ParseHeader(kUrlEtld_, header);
+
+ EXPECT_EQ(1u, cache()->GetEndpointGroupCountForTesting());
+ EXPECT_TRUE(EndpointGroupExistsInCache(kOriginEtld_, kGroup_,
+ OriginSubdomains::EXCLUDE));
+ EXPECT_EQ(1u, cache()->GetEndpointCount());
+ EXPECT_TRUE(EndpointExistsInCache(kOriginEtld_, kGroup_, kEndpoint_));
+}
+
+TEST_P(ReportingHeaderParserTest, IncludeSubdomainsNotBoolean) {
std::string header =
"{\"group\": \"" + kGroup_ +
"\", "
@@ -265,9 +402,27 @@ TEST_F(ReportingHeaderParserTest, IncludeSubdomainsNotBoolean) {
EndpointGroupExistsInCache(kOrigin_, kGroup_, OriginSubdomains::DEFAULT));
EXPECT_EQ(1u, cache()->GetEndpointCount());
EXPECT_TRUE(EndpointExistsInCache(kOrigin_, kGroup_, kEndpoint_));
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, NonDefaultPriority) {
+TEST_P(ReportingHeaderParserTest, NonDefaultPriority) {
const int kNonDefaultPriority = 10;
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {
{kEndpoint_, kNonDefaultPriority}};
@@ -286,9 +441,27 @@ TEST_F(ReportingHeaderParserTest, NonDefaultPriority) {
EXPECT_EQ(kNonDefaultPriority, endpoint.info.priority);
EXPECT_EQ(ReportingEndpoint::EndpointInfo::kDefaultWeight,
endpoint.info.weight);
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, NonDefaultWeight) {
+TEST_P(ReportingHeaderParserTest, NonDefaultWeight) {
const int kNonDefaultWeight = 10;
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {
{kEndpoint_, ReportingEndpoint::EndpointInfo::kDefaultPriority,
@@ -308,9 +481,27 @@ TEST_F(ReportingHeaderParserTest, NonDefaultWeight) {
EXPECT_EQ(ReportingEndpoint::EndpointInfo::kDefaultPriority,
endpoint.info.priority);
EXPECT_EQ(kNonDefaultWeight, endpoint.info.weight);
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, MaxAge) {
+TEST_P(ReportingHeaderParserTest, MaxAge) {
const int kMaxAgeSecs = 100;
base::TimeDelta ttl = base::TimeDelta::FromSeconds(kMaxAgeSecs);
base::Time expires = clock()->Now() + ttl;
@@ -324,9 +515,27 @@ TEST_F(ReportingHeaderParserTest, MaxAge) {
EXPECT_EQ(1u, cache()->GetEndpointGroupCountForTesting());
EXPECT_TRUE(EndpointGroupExistsInCache(kOrigin_, kGroup_,
OriginSubdomains::DEFAULT, expires));
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(1, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, MultipleEndpointsSameGroup) {
+TEST_P(ReportingHeaderParserTest, MultipleEndpointsSameGroup) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_},
{kEndpoint2_}};
std::string header =
@@ -359,9 +568,31 @@ TEST_F(ReportingHeaderParserTest, MultipleEndpointsSameGroup) {
endpoint2.info.priority);
EXPECT_EQ(ReportingEndpoint::EndpointInfo::kDefaultWeight,
endpoint2.info.weight);
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, MultipleEndpointsDifferentGroups) {
+TEST_P(ReportingHeaderParserTest, MultipleEndpointsDifferentGroups) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints1 = {{kEndpoint_}};
std::vector<ReportingEndpoint::EndpointInfo> endpoints2 = {{kEndpoint_}};
std::string header =
@@ -397,9 +628,36 @@ TEST_F(ReportingHeaderParserTest, MultipleEndpointsDifferentGroups) {
endpoint2.info.priority);
EXPECT_EQ(ReportingEndpoint::EndpointInfo::kDefaultWeight,
endpoint2.info.weight);
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(2, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup2_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, MultipleHeadersFromDifferentOrigins) {
+TEST_P(ReportingHeaderParserTest, MultipleHeadersFromDifferentOrigins) {
// First origin sets a header with two endpoints in the same group.
std::vector<ReportingEndpoint::EndpointInfo> endpoints1 = {{kEndpoint_},
{kEndpoint2_}};
@@ -432,9 +690,49 @@ TEST_F(ReportingHeaderParserTest, MultipleHeadersFromDifferentOrigins) {
EXPECT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint2_));
EXPECT_TRUE(FindEndpointInCache(kOrigin2_, kGroup_, kEndpoint_));
EXPECT_TRUE(FindEndpointInCache(kOrigin2_, kGroup2_, kEndpoint2_));
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(4, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(3, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin2_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin2_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin2_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin2_, kGroup2_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest,
+TEST_P(ReportingHeaderParserTest,
HeaderErroneouslyContainsMultipleGroupsOfSameName) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints1 = {{kEndpoint_}};
std::vector<ReportingEndpoint::EndpointInfo> endpoints2 = {{kEndpoint2_}};
@@ -470,9 +768,95 @@ TEST_F(ReportingHeaderParserTest,
endpoint2.info.priority);
EXPECT_EQ(ReportingEndpoint::EndpointInfo::kDefaultWeight,
endpoint2.info.weight);
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2, mock_store()->StoredEndpointsCount());
+ EXPECT_EQ(1, mock_store()->StoredEndpointGroupsCount());
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
+}
+
+TEST_P(ReportingHeaderParserTest,
+ HeaderErroneouslyContainsGroupsWithRedundantEndpoints) {
+ std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_},
+ {kEndpoint_}};
+ std::string header =
+ ConstructHeaderGroupString(MakeEndpointGroup(kGroup_, endpoints));
+ ParseHeader(kUrl_, header);
+
+ // We should dedupe the identical endpoint URLs.
+ EXPECT_EQ(1u, cache()->GetEndpointCount());
+ ASSERT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint_));
+
+ EXPECT_TRUE(
+ EndpointGroupExistsInCache(kOrigin_, kGroup_, OriginSubdomains::DEFAULT));
+ EXPECT_EQ(1u, cache()->GetEndpointGroupCountForTesting());
+
+ EXPECT_TRUE(OriginClientExistsInCache(kOrigin_));
+}
+
+TEST_P(ReportingHeaderParserTest,
+ HeaderErroneouslyContainsMultipleGroupsOfSameNameAndEndpoints) {
+ std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{kEndpoint_}};
+ std::string header =
+ ConstructHeaderGroupString(MakeEndpointGroup(kGroup_, endpoints)) + ", " +
+ ConstructHeaderGroupString(MakeEndpointGroup(kGroup_, endpoints));
+ ParseHeader(kUrl_, header);
+
+ // We should dedupe the identical endpoint URLs, even when they're in
+ // different headers.
+ EXPECT_EQ(1u, cache()->GetEndpointCount());
+ ASSERT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint_));
+
+ EXPECT_TRUE(
+ EndpointGroupExistsInCache(kOrigin_, kGroup_, OriginSubdomains::DEFAULT));
+ EXPECT_EQ(1u, cache()->GetEndpointGroupCountForTesting());
+
+ EXPECT_TRUE(OriginClientExistsInCache(kOrigin_));
}
-TEST_F(ReportingHeaderParserTest, OverwriteOldHeader) {
+TEST_P(ReportingHeaderParserTest,
+ HeaderErroneouslyContainsGroupsOfSameNameAndOverlappingEndpoints) {
+ std::vector<ReportingEndpoint::EndpointInfo> endpoints1 = {{kEndpoint_},
+ {kEndpoint2_}};
+ std::vector<ReportingEndpoint::EndpointInfo> endpoints2 = {{kEndpoint_},
+ {kEndpoint3_}};
+ std::string header =
+ ConstructHeaderGroupString(MakeEndpointGroup(kGroup_, endpoints1)) +
+ ", " + ConstructHeaderGroupString(MakeEndpointGroup(kGroup_, endpoints2));
+ ParseHeader(kUrl_, header);
+
+ // We should dedupe the identical endpoint URLs, even when they're in
+ // different headers.
+ EXPECT_EQ(3u, cache()->GetEndpointCount());
+ ASSERT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint_));
+ ASSERT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint2_));
+ ASSERT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint3_));
+
+ EXPECT_TRUE(
+ EndpointGroupExistsInCache(kOrigin_, kGroup_, OriginSubdomains::DEFAULT));
+ EXPECT_EQ(1u, cache()->GetEndpointGroupCountForTesting());
+
+ EXPECT_TRUE(OriginClientExistsInCache(kOrigin_));
+}
+
+TEST_P(ReportingHeaderParserTest, OverwriteOldHeader) {
// First, the origin sets a header with two endpoints in the same group.
std::vector<ReportingEndpoint::EndpointInfo> endpoints1 = {
{kEndpoint_, 10 /* priority */}, {kEndpoint2_}};
@@ -487,6 +871,29 @@ TEST_F(ReportingHeaderParserTest, OverwriteOldHeader) {
EXPECT_EQ(2u, cache()->GetEndpointCount());
EXPECT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint_));
EXPECT_TRUE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint2_));
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(1, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
// Second header from the same origin should overwrite the previous one.
std::vector<ReportingEndpoint::EndpointInfo> endpoints2 = {
@@ -514,9 +921,34 @@ TEST_F(ReportingHeaderParserTest, OverwriteOldHeader) {
FindEndpointInCache(kOrigin_, kGroup_, kEndpoint_).info.priority);
EXPECT_FALSE(FindEndpointInCache(kOrigin_, kGroup_, kEndpoint2_));
EXPECT_TRUE(FindEndpointInCache(kOrigin_, kGroup2_, kEndpoint2_));
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2 + 1,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(1 + 1, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(
+ 1, mock_store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup2_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, OverwriteOldHeaderWithCompletelyNew) {
+TEST_P(ReportingHeaderParserTest, OverwriteOldHeaderWithCompletelyNew) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints1_1 = {{MakeURL(10)},
{MakeURL(11)}};
std::vector<ReportingEndpoint::EndpointInfo> endpoints2_1 = {{MakeURL(20)},
@@ -537,6 +969,49 @@ TEST_F(ReportingHeaderParserTest, OverwriteOldHeaderWithCompletelyNew) {
EXPECT_TRUE(
EndpointGroupExistsInCache(kOrigin_, "3", OriginSubdomains::DEFAULT));
EXPECT_EQ(6u, cache()->GetEndpointCount());
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(6,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(3, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "1", endpoints1_1[0]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "1", endpoints1_1[1]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "2", endpoints2_1[0]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "2", endpoints2_1[1]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "3", endpoints3_1[0]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "3", endpoints3_1[1]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "1", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "2", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "3", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
// Replace endpoints in each group with completely new endpoints.
std::vector<ReportingEndpoint::EndpointInfo> endpoints1_2 = {{MakeURL(12)}};
@@ -565,6 +1040,47 @@ TEST_F(ReportingHeaderParserTest, OverwriteOldHeaderWithCompletelyNew) {
EXPECT_TRUE(FindEndpointInCache(kOrigin_, "3", MakeURL(32)));
EXPECT_FALSE(FindEndpointInCache(kOrigin_, "3", MakeURL(30)));
EXPECT_FALSE(FindEndpointInCache(kOrigin_, "3", MakeURL(31)));
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(6 + 3,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(3, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(
+ 6, mock_store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(0, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "1", endpoints1_2[0]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "2", endpoints2_2[0]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "3", endpoints3_2[0]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "1", endpoints1_1[0]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "1", endpoints1_1[1]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "2", endpoints2_1[0]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "2", endpoints2_1[1]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "3", endpoints3_1[0]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "3", endpoints3_1[1]));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
// Replace all the groups with completely new groups.
std::vector<ReportingEndpoint::EndpointInfo> endpoints4_3 = {{MakeURL(40)}};
@@ -586,14 +1102,75 @@ TEST_F(ReportingHeaderParserTest, OverwriteOldHeaderWithCompletelyNew) {
EXPECT_FALSE(
EndpointGroupExistsInCache(kOrigin_, "3", OriginSubdomains::DEFAULT));
EXPECT_EQ(2u, cache()->GetEndpointCount());
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(6 + 3 + 2,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(3 + 2, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(6 + 3, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(3, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "4", endpoints4_3[0]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "5", endpoints5_3[0]));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "4", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "5", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "1", endpoints1_2[0]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "2", endpoints2_2[0]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, "3", endpoints3_2[0]));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "1", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "2", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, "3", OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, ZeroMaxAgeRemovesEndpointGroup) {
+TEST_P(ReportingHeaderParserTest, ZeroMaxAgeRemovesEndpointGroup) {
// Without a pre-existing client, max_age: 0 should do nothing.
ASSERT_EQ(0u, cache()->GetEndpointCount());
ParseHeader(kUrl_, "{\"endpoints\":[{\"url\":\"" + kEndpoint_.spec() +
"\"}],\"max_age\":0}");
EXPECT_EQ(0u, cache()->GetEndpointCount());
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(0,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(0, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ }
// Set a header with two endpoint groups.
std::vector<ReportingEndpoint::EndpointInfo> endpoints1 = {{kEndpoint_}};
@@ -611,6 +1188,34 @@ TEST_F(ReportingHeaderParserTest, ZeroMaxAgeRemovesEndpointGroup) {
EXPECT_TRUE(EndpointGroupExistsInCache(kOrigin_, kGroup2_,
OriginSubdomains::DEFAULT));
EXPECT_EQ(2u, cache()->GetEndpointCount());
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(2, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ expected_commands.emplace_back(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup2_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
// Set another header with max_age: 0 to delete one of the groups.
std::string header2 = ConstructHeaderGroupString(MakeEndpointGroup(
@@ -631,6 +1236,29 @@ TEST_F(ReportingHeaderParserTest, ZeroMaxAgeRemovesEndpointGroup) {
EXPECT_TRUE(EndpointGroupExistsInCache(kOrigin_, kGroup2_,
OriginSubdomains::DEFAULT));
EXPECT_EQ(1u, cache()->GetEndpointCount());
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(2, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(
+ 1, mock_store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(1, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup_,
+ ReportingEndpoint::EndpointInfo{kEndpoint_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
// Set another header with max_age: 0 to delete the other group. (Should work
// even if the endpoints field is an empty list.)
@@ -644,9 +1272,32 @@ TEST_F(ReportingHeaderParserTest, ZeroMaxAgeRemovesEndpointGroup) {
EXPECT_FALSE(OriginClientExistsInCache(kOrigin_));
EXPECT_EQ(0u, cache()->GetEndpointGroupCountForTesting());
EXPECT_EQ(0u, cache()->GetEndpointCount());
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(2,
+ mock_store()->CountCommands(CommandType::ADD_REPORTING_ENDPOINT));
+ EXPECT_EQ(2, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(1 + 1, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(1 + 1, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ MockPersistentReportingStore::CommandList expected_commands;
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT,
+ ReportingEndpoint(kOrigin_, kGroup2_,
+ ReportingEndpoint::EndpointInfo{kEndpoint2_}));
+ expected_commands.emplace_back(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP,
+ CachedReportingEndpointGroup(
+ kOrigin_, kGroup2_, OriginSubdomains::DEFAULT /* irrelevant */,
+ base::Time() /* irrelevant */, base::Time() /* irrelevant */));
+ EXPECT_THAT(mock_store()->GetAllCommands(),
+ testing::IsSupersetOf(expected_commands));
+ }
}
-TEST_F(ReportingHeaderParserTest, EvictEndpointsOverPerOriginLimit1) {
+TEST_P(ReportingHeaderParserTest, EvictEndpointsOverPerOriginLimit1) {
// Set a header with too many endpoints, all in the same group.
std::vector<ReportingEndpoint::EndpointInfo> endpoints;
for (size_t i = 0; i < policy().max_endpoints_per_origin + 1; ++i) {
@@ -658,9 +1309,20 @@ TEST_F(ReportingHeaderParserTest, EvictEndpointsOverPerOriginLimit1) {
// Endpoint count should be at most the limit.
EXPECT_GE(policy().max_endpoints_per_origin, cache()->GetEndpointCount());
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(policy().max_endpoints_per_origin + 1,
+ static_cast<unsigned long>(mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT)));
+ EXPECT_EQ(1, mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP));
+ EXPECT_EQ(
+ 1, mock_store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ }
}
-TEST_F(ReportingHeaderParserTest, EvictEndpointsOverPerOriginLimit2) {
+TEST_P(ReportingHeaderParserTest, EvictEndpointsOverPerOriginLimit2) {
// Set a header with too many endpoints, in different groups.
std::string header;
for (size_t i = 0; i < policy().max_endpoints_per_origin + 1; ++i) {
@@ -674,9 +1336,23 @@ TEST_F(ReportingHeaderParserTest, EvictEndpointsOverPerOriginLimit2) {
// Endpoint count should be at most the limit.
EXPECT_GE(policy().max_endpoints_per_origin, cache()->GetEndpointCount());
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(policy().max_endpoints_per_origin + 1,
+ static_cast<unsigned long>(mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT)));
+ EXPECT_EQ(policy().max_endpoints_per_origin + 1,
+ static_cast<unsigned long>(mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP)));
+ EXPECT_EQ(
+ 1, mock_store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(1, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ }
}
-TEST_F(ReportingHeaderParserTest, EvictEndpointsOverGlobalLimit) {
+TEST_P(ReportingHeaderParserTest, EvictEndpointsOverGlobalLimit) {
// Set headers from different origins up to the global limit.
for (size_t i = 0; i < policy().max_endpoint_count; ++i) {
std::vector<ReportingEndpoint::EndpointInfo> endpoints = {{MakeURL(i)}};
@@ -692,7 +1368,25 @@ TEST_F(ReportingHeaderParserTest, EvictEndpointsOverGlobalLimit) {
// Endpoint count should be at most the limit.
EXPECT_GE(policy().max_endpoint_count, cache()->GetEndpointCount());
+
+ if (mock_store()) {
+ mock_store()->Flush();
+ EXPECT_EQ(policy().max_endpoint_count + 1,
+ static_cast<unsigned long>(mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT)));
+ EXPECT_EQ(policy().max_endpoint_count + 1,
+ static_cast<unsigned long>(mock_store()->CountCommands(
+ CommandType::ADD_REPORTING_ENDPOINT_GROUP)));
+ EXPECT_EQ(
+ 1, mock_store()->CountCommands(CommandType::DELETE_REPORTING_ENDPOINT));
+ EXPECT_EQ(1, mock_store()->CountCommands(
+ CommandType::DELETE_REPORTING_ENDPOINT_GROUP));
+ }
}
+INSTANTIATE_TEST_SUITE_P(ReportingHeaderParserStoreTest,
+ ReportingHeaderParserTest,
+ testing::Bool());
+
} // namespace
} // namespace net
diff --git a/chromium/net/reporting/reporting_service.cc b/chromium/net/reporting/reporting_service.cc
index 9fe09f261e3..313a825d111 100644
--- a/chromium/net/reporting/reporting_service.cc
+++ b/chromium/net/reporting/reporting_service.cc
@@ -59,7 +59,7 @@ class ReportingServiceImpl : public ReportingService {
context_->cache()->AddReport(sanitized_url, user_agent, group, type,
std::move(body), depth,
- context_->tick_clock()->NowTicks(), 0);
+ context_->tick_clock().NowTicks(), 0);
}
void ProcessHeader(const GURL& url,