diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2018-05-15 10:20:33 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2018-05-15 10:28:57 +0000 |
commit | d17ea114e5ef69ad5d5d7413280a13e6428098aa (patch) | |
tree | 2c01a75df69f30d27b1432467cfe7c1467a498da /chromium/components/download/internal | |
parent | 8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec (diff) | |
download | qtwebengine-chromium-d17ea114e5ef69ad5d5d7413280a13e6428098aa.tar.gz |
BASELINE: Update Chromium to 67.0.3396.47
Change-Id: Idcb1341782e417561a2473eeecc82642dafda5b7
Reviewed-by: Michal Klocek <michal.klocek@qt.io>
Diffstat (limited to 'chromium/components/download/internal')
69 files changed, 13009 insertions, 408 deletions
diff --git a/chromium/components/download/internal/background_service/BUILD.gn b/chromium/components/download/internal/background_service/BUILD.gn index 3f0a725ab6a..9b20067d64f 100644 --- a/chromium/components/download/internal/background_service/BUILD.gn +++ b/chromium/components/download/internal/background_service/BUILD.gn @@ -27,6 +27,8 @@ static_library("internal") { "controller_impl.h", "debugging_client.cc", "debugging_client.h", + "download_blockage_status.cc", + "download_blockage_status.h", "download_driver.h", "download_service_impl.cc", "download_service_impl.h", @@ -87,6 +89,7 @@ static_library("internal") { "//components/download/public/background_service:public", "//components/leveldb_proto", "//net", + "//services/network/public/cpp", "//storage/browser", ] @@ -152,7 +155,7 @@ source_set("unit_tests") { "//components/download/internal/background_service/test:test_support", "//components/download/public/background_service/test:test_support", "//components/leveldb_proto:test_support", - "//net:test_support", + "//services/network:test_support", "//storage/browser", "//testing/gmock", "//testing/gtest", diff --git a/chromium/components/download/internal/background_service/DEPS b/chromium/components/download/internal/background_service/DEPS index ed5bc8cbaac..9500a4e33eb 100644 --- a/chromium/components/download/internal/background_service/DEPS +++ b/chromium/components/download/internal/background_service/DEPS @@ -5,6 +5,8 @@ include_rules = [ "+base", "+jni", "+net", + "+services/network/public", + "+services/network/test", "+storage/browser", "+storage/common", ] diff --git a/chromium/components/download/internal/background_service/config.cc b/chromium/components/download/internal/background_service/config.cc index 81aaaaa7ece..800185c98af 100644 --- a/chromium/components/download/internal/background_service/config.cc +++ b/chromium/components/download/internal/background_service/config.cc @@ -6,7 +6,6 @@ #include <string> -#include "base/memory/ptr_util.h" #include "base/metrics/field_trial_params.h" #include "base/strings/string_number_conversions.h" #include "components/download/public/background_service/features.h" @@ -76,6 +75,10 @@ const base::TimeDelta kDefaultNavigationCompletionDelay = const base::TimeDelta kDefaultNavigationTimeoutDelay = base::TimeDelta::FromSeconds(300); +// The default timeout for a pending upload. +const base::TimeDelta kDefaultPendingUploadTimeoutDelay = + base::TimeDelta::FromSeconds(30); + // The default value of download retry delay when the download is failed. const base::TimeDelta kDefaultDownloadRetryDelay = base::TimeDelta::FromSeconds(20); @@ -146,6 +149,11 @@ std::unique_ptr<Configuration> Configuration::CreateFromFinch() { base::TimeDelta::FromSeconds(base::saturated_cast<int>( GetFinchConfigUInt(kNavigationTimeoutDelaySecondsConfig, kDefaultNavigationTimeoutDelay.InSeconds()))); + config->pending_upload_timeout_delay = + base::TimeDelta::FromSeconds(base::saturated_cast<int>( + GetFinchConfigUInt(kPendingUploadTimeoutDelaySecondsConfig, + kDefaultPendingUploadTimeoutDelay.InSeconds()))); + config->download_retry_delay = base::TimeDelta::FromMilliseconds(base::saturated_cast<int>( GetFinchConfigUInt(kDownloadRetryDelayMsConfig, @@ -170,6 +178,7 @@ Configuration::Configuration() network_change_delay(kDefaultNetworkChangeDelay), navigation_completion_delay(kDefaultNavigationCompletionDelay), navigation_timeout_delay(kDefaultNavigationTimeoutDelay), + pending_upload_timeout_delay(kDefaultPendingUploadTimeoutDelay), download_retry_delay(kDefaultDownloadRetryDelay) {} } // namespace download diff --git a/chromium/components/download/internal/background_service/config.h b/chromium/components/download/internal/background_service/config.h index 6727661372a..26af2e079af 100644 --- a/chromium/components/download/internal/background_service/config.h +++ b/chromium/components/download/internal/background_service/config.h @@ -69,6 +69,12 @@ constexpr char kNavigationCompletionDelaySecondsConfig[] = constexpr char kNavigationTimeoutDelaySecondsConfig[] = "navigation_timeout_delay_seconds"; +// Configuration name for the minimum timeout value after which an upload can be +// killed if the client still hasn't responded with the upload data. Measured in +// seconds. +constexpr char kPendingUploadTimeoutDelaySecondsConfig[] = + "pending_upload_timeout_delay_seconds"; + // Configuration name for the retry delay when the download is failed, measured // in milliseconds. constexpr char kDownloadRetryDelayMsConfig[] = "retry_delay_ms"; @@ -142,6 +148,10 @@ struct Configuration { // The timeout to wait for after a navigation starts. base::TimeDelta navigation_timeout_delay; + // The minimum timeout after which upload entries waiting on data from their + // clients might be killed. + base::TimeDelta pending_upload_timeout_delay; + // The delay to retry a download when the download is failed. base::TimeDelta download_retry_delay; diff --git a/chromium/components/download/internal/background_service/controller.h b/chromium/components/download/internal/background_service/controller.h index 6ea581c0098..e4725da4ef3 100644 --- a/chromium/components/download/internal/background_service/controller.h +++ b/chromium/components/download/internal/background_service/controller.h @@ -36,8 +36,10 @@ enum class CompletionType { OUT_OF_RETRIES = 6, // The download expended it's number of 'free' retries. OUT_OF_RESUMPTIONS = 7, + // The upload was timed out due to unresponsive client. + UPLOAD_TIMEOUT = 8, // The count of entries for the enum. - COUNT = 8, + COUNT = 9, }; // The core Controller responsible for gluing various DownloadService components diff --git a/chromium/components/download/internal/background_service/controller_impl.cc b/chromium/components/download/internal/background_service/controller_impl.cc index ec17c3abaef..5b602187936 100644 --- a/chromium/components/download/internal/background_service/controller_impl.cc +++ b/chromium/components/download/internal/background_service/controller_impl.cc @@ -11,7 +11,6 @@ #include "base/bind.h" #include "base/callback_helpers.h" -#include "base/memory/ptr_util.h" #include "base/optional.h" #include "base/strings/stringprintf.h" #include "base/threading/thread_task_runner_handle.h" @@ -32,6 +31,7 @@ #include "components/download/public/background_service/download_metadata.h" #include "components/download/public/background_service/navigation_monitor.h" #include "net/traffic_annotation/network_traffic_annotation.h" +#include "services/network/public/cpp/resource_request_body.h" namespace download { namespace { @@ -45,6 +45,14 @@ void TransitTo(Entry* entry, Entry::State new_state, Model* model) { model->Update(*entry); } +// Helper function to post the callback once again before starting a download. +void RunOnDownloadReadyToStart( + GetUploadDataCallback callback, + scoped_refptr<network::ResourceRequestBody> post_body) { + base::ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce(std::move(callback), post_body)); +} + // Helper function to move from a CompletionType to a Client::FailureReason. Client::FailureReason FailureReasonFromCompletionType(CompletionType type) { // SUCCEED does not map to a FailureReason. @@ -59,6 +67,8 @@ Client::FailureReason FailureReasonFromCompletionType(CompletionType type) { return Client::FailureReason::ABORTED; case CompletionType::TIMEOUT: return Client::FailureReason::TIMEDOUT; + case CompletionType::UPLOAD_TIMEOUT: + return Client::FailureReason::UPLOAD_TIMEDOUT; case CompletionType::UNKNOWN: return Client::FailureReason::UNKNOWN; case CompletionType::CANCEL: @@ -441,7 +451,8 @@ void ControllerImpl::OnDownloadFailed(const DriverEntry& download, return; } - if (!download.done && failure_type == FailureType::RECOVERABLE) { + if (!download.done && failure_type == FailureType::RECOVERABLE && + !entry->has_upload_data) { // Because the network offline signal comes later than actual download // failure, retry the download after a delay to avoid the retry to fail // immediately again. @@ -794,7 +805,8 @@ void ControllerImpl::ResolveInitialRequestStates() { case DriverEntry::State::INTERRUPTED: // The DriverEntry isn't done, so we need to set the Entry to the // 'active' state. - new_state = active; + new_state = + entry->has_upload_data ? Entry::State::COMPLETE : active; break; case DriverEntry::State::COMPLETE: // Intentional fallthrough. // TODO(dtrainor, xingliu) Revisit this CANCELLED state to make sure @@ -847,6 +859,7 @@ void ControllerImpl::ResolveInitialRequestStates() { driver_entry->state == DriverEntry::State::INTERRUPTED) ? CompletionType::UNKNOWN : CompletionType::SUCCEED; + // TODO(shaktisahu) : May be set a completion type for upload. HandleCompleteDownload(completion_type, entry->guid); } else { // We're staying in COMPLETE. Make sure there is no DriverEntry here. @@ -877,8 +890,9 @@ void ControllerImpl::UpdateDriverStateWithGuid(const std::string& guid) { void ControllerImpl::UpdateDriverState(Entry* entry) { DCHECK_EQ(controller_state_, State::READY); - if (entry->state != Entry::State::ACTIVE && - entry->state != Entry::State::PAUSED) { + if ((entry->state != Entry::State::ACTIVE && + entry->state != Entry::State::PAUSED) || + pending_uploads_.find(entry->guid) != pending_uploads_.end()) { return; } @@ -899,27 +913,14 @@ void ControllerImpl::UpdateDriverState(Entry* entry) { bool active = driver_entry.has_value() && driver_entry->state == DriverEntry::State::IN_PROGRESS; - bool want_active = entry->state == Entry::State::ACTIVE; - - bool blocked_by_criteria = - !device_status_listener_->CurrentDeviceStatus() - .MeetsCondition(entry->scheduling_params, - config_->download_battery_percentage) - .MeetsRequirements(); - bool blocked_by_downloads = - !externally_active_downloads_.empty() && - entry->scheduling_params.priority <= SchedulingParams::Priority::NORMAL; - bool blocked_by_navigation = ShouldBlockDownloadOnNavigation(entry); - bool is_blocked = - blocked_by_criteria || blocked_by_downloads || blocked_by_navigation; + auto blockage_status = IsDownloadBlocked(entry); - if (!want_active || is_blocked) { + if (blockage_status.IsBlocked()) { if (active) { stats::LogEntryEvent(stats::DownloadEvent::SUSPEND); - stats::LogDownloadPauseReason(blocked_by_criteria, !want_active, - blocked_by_navigation, - blocked_by_downloads); + stats::LogDownloadPauseReason(blockage_status, + false /*on_upload_data_received*/); } if (driver_entry.has_value()) @@ -958,18 +959,96 @@ void ControllerImpl::UpdateDriverState(Entry* entry) { } if (driver_entry.has_value()) { + // For uploads, we should never call resume unless it is already in + // progress, since we have to re-supply the upload data from client. + DCHECK(!entry->has_upload_data || + driver_entry->state == DriverEntry::State::IN_PROGRESS); + driver_->Resume(entry->guid); } else { stats::LogEntryEvent(stats::DownloadEvent::START); - driver_->Start( - entry->request_params, entry->guid, entry->target_file_path, - net::NetworkTrafficAnnotationTag(entry->traffic_annotation)); + PrepareToStartDownload(entry); } } log_sink_->OnServiceDownloadChanged(entry->guid); } +void ControllerImpl::PrepareToStartDownload(Entry* entry) { + pending_uploads_.insert(entry->guid); + + auto* client = clients_->GetClient(entry->client); + DCHECK(client); + + auto callback = base::BindOnce(&ControllerImpl::OnDownloadReadyToStart, + weak_ptr_factory_.GetWeakPtr(), entry->guid); + + // To ensure no re-entrancy, we post the response again after receiving from + // the client + client->GetUploadData(entry->guid, base::BindOnce(&RunOnDownloadReadyToStart, + std::move(callback))); + + // Reset the timeout timer in case client doesn't respond. + cancel_uploads_callback_.Reset(base::BindRepeating( + &ControllerImpl::KillTimedOutUploads, weak_ptr_factory_.GetWeakPtr())); + base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, cancel_uploads_callback_.callback(), + config_->pending_upload_timeout_delay); +} + +void ControllerImpl::OnDownloadReadyToStart( + const std::string& guid, + scoped_refptr<network::ResourceRequestBody> post_body) { + DCHECK(pending_uploads_.find(guid) != pending_uploads_.end()); + pending_uploads_.erase(guid); + + auto* entry = model_->Get(guid); + if (!entry) { + stats::LogEntryRemovedWhileWaitingForUploadResponse(); + return; + } + + if (post_body) { + entry->has_upload_data = true; + model_->Update(*entry); + } + + stats::LogHasUploadData(entry->client, entry->has_upload_data); + + auto blockage_status = IsDownloadBlocked(entry); + if (blockage_status.IsBlocked()) { + stats::LogDownloadPauseReason(blockage_status, + true /*on_upload_data_received*/); + return; + } + + DCHECK(!driver_->Find(guid).has_value()); + driver_->Start(entry->request_params, entry->guid, entry->target_file_path, + post_body, + net::NetworkTrafficAnnotationTag(entry->traffic_annotation)); +} + +DownloadBlockageStatus ControllerImpl::IsDownloadBlocked(Entry* entry) { + DownloadBlockageStatus status; + status.blocked_by_criteria = + !device_status_listener_->CurrentDeviceStatus() + .MeetsCondition(entry->scheduling_params, + config_->download_battery_percentage) + .MeetsRequirements(); + status.blocked_by_downloads = + !externally_active_downloads_.empty() && + entry->scheduling_params.priority <= SchedulingParams::Priority::NORMAL; + + status.blocked_by_navigation = ShouldBlockDownloadOnNavigation(entry); + status.entry_not_active = entry->state != Entry::State::ACTIVE; + return status; +} + +void ControllerImpl::KillTimedOutUploads() { + for (const std::string& guid : std::move(pending_uploads_)) + HandleCompleteDownload(CompletionType::UPLOAD_TIMEOUT, guid); +} + void ControllerImpl::NotifyClientsOfStartup(bool state_lost) { auto categorized = util::MapEntriesToMetadataForClients( clients_->GetRegisteredClients(), model_->PeekEntries()); @@ -1048,6 +1127,8 @@ void ControllerImpl::HandleCompleteDownload(CompletionType type, entry->bytes_downloaded = driver_entry->bytes_downloaded; CompletionInfo completion_info(driver_entry->current_file_path, driver_entry->bytes_downloaded); + completion_info.blob_handle = driver_entry->blob_handle; + entry->last_cleanup_check_time = driver_entry->completion_time; base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::Bind(&ControllerImpl::SendOnDownloadSucceeded, diff --git a/chromium/components/download/internal/background_service/controller_impl.h b/chromium/components/download/internal/background_service/controller_impl.h index 52365f310ac..8c3b375426d 100644 --- a/chromium/components/download/internal/background_service/controller_impl.h +++ b/chromium/components/download/internal/background_service/controller_impl.h @@ -15,6 +15,7 @@ #include "base/optional.h" #include "base/trace_event/memory_dump_provider.h" #include "components/download/internal/background_service/controller.h" +#include "components/download/internal/background_service/download_blockage_status.h" #include "components/download/internal/background_service/download_driver.h" #include "components/download/internal/background_service/entry.h" #include "components/download/internal/background_service/log_source.h" @@ -165,6 +166,10 @@ class ControllerImpl : public Controller, // or resume a download accordingly. void UpdateDriverState(Entry* entry); + // Returns a struct representing various reasons whether the download cannot + // start or continue at this time. + DownloadBlockageStatus IsDownloadBlocked(Entry* entry); + // Notifies all Client in |clients_| that this controller is initialized and // lets them know which download requests we are aware of for their // DownloadClient. @@ -202,6 +207,10 @@ class ControllerImpl : public Controller, void RemoveCleanupEligibleDownloads(); void HandleExternalDownload(const std::string& guid, bool active); + void PrepareToStartDownload(Entry* entry); + void OnDownloadReadyToStart( + const std::string& guid, + scoped_refptr<network::ResourceRequestBody> post_body); // Postable methods meant to just be pass throughs to Client APIs. This is // meant to help prevent reentrancy. @@ -230,6 +239,10 @@ class ControllerImpl : public Controller, // Kills the downloads which have surpassed their cancel_after time. void KillTimedOutDownloads(); + // A periodical task that will kill all the pending uploads that haven't + // received upload data from their respective clients. + void KillTimedOutUploads(); + Configuration* config_; LogSink* log_sink_; @@ -251,9 +264,11 @@ class ControllerImpl : public Controller, State controller_state_; StartupStatus startup_status_; std::set<std::string> externally_active_downloads_; + std::set<std::string> pending_uploads_; std::map<std::string, DownloadParams::StartCallback> start_callbacks_; std::map<DownloadTaskType, TaskFinishedCallback> task_finished_callbacks_; base::CancelableClosure cancel_downloads_callback_; + base::CancelableClosure cancel_uploads_callback_; // Only used to post tasks on the same thread. base::WeakPtrFactory<ControllerImpl> weak_ptr_factory_; diff --git a/chromium/components/download/internal/background_service/controller_impl_unittest.cc b/chromium/components/download/internal/background_service/controller_impl_unittest.cc index d98af57a28e..66d8ff523da 100644 --- a/chromium/components/download/internal/background_service/controller_impl_unittest.cc +++ b/chromium/components/download/internal/background_service/controller_impl_unittest.cc @@ -12,6 +12,7 @@ #include "base/macros.h" #include "base/strings/string_util.h" #include "base/test/histogram_tester.h" +#include "base/test/test_mock_time_task_runner.h" #include "base/test/test_simple_task_runner.h" #include "base/threading/thread_task_runner_handle.h" #include "components/download/internal/background_service/client_set.h" @@ -31,6 +32,7 @@ #include "components/download/public/background_service/test/empty_client.h" #include "components/download/public/background_service/test/mock_client.h" #include "net/traffic_annotation/network_traffic_annotation_test_helper.h" +#include "services/network/public/cpp/resource_request_body.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" @@ -64,6 +66,37 @@ DriverEntry BuildDriverEntry(const Entry& entry, DriverEntry::State state) { void NotifyTaskFinished(bool success) {} +class UploadClient : public test::MockClient { + public: + UploadClient() = default; + ~UploadClient() override = default; + + void GetUploadData(const std::string& guid, + GetUploadDataCallback callback) override; + void SetUploadResponseDelayForGuid(const std::string& guid, + unsigned int delay); + + private: + std::map<std::string, unsigned int> upload_response_delay_; + + DISALLOW_COPY_AND_ASSIGN(UploadClient); +}; + +void UploadClient::GetUploadData(const std::string& guid, + GetUploadDataCallback callback) { + scoped_refptr<network::ResourceRequestBody> post_body = + new network::ResourceRequestBody(); + unsigned int delay = upload_response_delay_[guid]; + base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, base::BindOnce(std::move(callback), post_body), + base::TimeDelta::FromSeconds(delay)); +} + +void UploadClient::SetUploadResponseDelayForGuid(const std::string& guid, + unsigned int delay) { + upload_response_delay_[guid] = delay; +} + class MockTaskScheduler : public TaskScheduler { public: MockTaskScheduler() = default; @@ -125,7 +158,7 @@ void MockFileMonitor::HardRecover(const FileMonitor::InitCallback& callback) { class DownloadServiceControllerImplTest : public testing::Test { public: DownloadServiceControllerImplTest() - : task_runner_(new base::TestSimpleTaskRunner), + : task_runner_(new base::TestMockTimeTaskRunner), handle_(task_runner_), controller_(nullptr), client_(nullptr), @@ -145,6 +178,7 @@ class DownloadServiceControllerImplTest : public testing::Test { void SetUp() override { auto client = std::make_unique<NiceMock<test::MockClient>>(); + auto client3 = std::make_unique<NiceMock<UploadClient>>(); auto driver = std::make_unique<test::TestDownloadDriver>(); auto store = std::make_unique<test::TestStore>(); config_ = std::make_unique<Configuration>(); @@ -158,6 +192,7 @@ class DownloadServiceControllerImplTest : public testing::Test { log_sink_ = std::make_unique<test::BlackHoleLogSink>(); client_ = client.get(); + client3_ = client3.get(); driver_ = driver.get(); store_ = store.get(); @@ -165,6 +200,7 @@ class DownloadServiceControllerImplTest : public testing::Test { clients->insert(std::make_pair(DownloadClient::TEST, std::move(client))); clients->insert(std::make_pair(DownloadClient::TEST_2, std::make_unique<test::EmptyClient>())); + clients->insert(std::make_pair(DownloadClient::TEST_3, std::move(client3))); auto client_set = std::make_unique<ClientSet>(std::move(clients)); auto model = std::make_unique<ModelImpl>(std::move(store)); auto device_status_listener = @@ -212,7 +248,7 @@ class DownloadServiceControllerImplTest : public testing::Test { MOCK_METHOD2(StartCallback, void(const std::string&, DownloadParams::StartResult)); - scoped_refptr<base::TestSimpleTaskRunner> task_runner_; + scoped_refptr<base::TestMockTimeTaskRunner> task_runner_; base::ThreadTaskRunnerHandle handle_; std::unique_ptr<ControllerImpl> controller_; @@ -220,6 +256,7 @@ class DownloadServiceControllerImplTest : public testing::Test { std::unique_ptr<LogSink> log_sink_; NavigationMonitorImpl navigation_monitor; test::MockClient* client_; + UploadClient* client3_; test::TestDownloadDriver* driver_; test::TestStore* store_; ModelImpl* model_; @@ -939,7 +976,7 @@ TEST_F(DownloadServiceControllerImplTest, RetryOnFailure) { // Failed entry should exist because we retry after a delay. EXPECT_NE(nullptr, model_->Get(entry2.guid)); - task_runner_->RunUntilIdle(); + task_runner_->FastForwardUntilNoTasksRemain(); // Retry is done, and failed entry should be removed. EXPECT_EQ(nullptr, model_->Get(entry2.guid)); @@ -954,7 +991,7 @@ TEST_F(DownloadServiceControllerImplTest, RetryOnFailure) { // Failed entry should exist because we retry after a delay. EXPECT_NE(nullptr, model_->Get(entry3.guid)); - task_runner_->RunUntilIdle(); + task_runner_->FastForwardUntilNoTasksRemain(); // Retry is done, and failed entry should be removed. EXPECT_EQ(nullptr, model_->Get(entry2.guid)); } @@ -1129,7 +1166,7 @@ TEST_F(DownloadServiceControllerImplTest, DownloadCompletionTest) { EXPECT_CALL(*client_, OnDownloadFailed(entry2.guid, Client::FailureReason::ABORTED)) .Times(1); - driver_->Start(RequestParams(), entry2.guid, entry2.target_file_path, + driver_->Start(RequestParams(), entry2.guid, entry2.target_file_path, nullptr, TRAFFIC_ANNOTATION_FOR_TESTS); // Test FailureReason::NETWORK. @@ -1141,6 +1178,138 @@ TEST_F(DownloadServiceControllerImplTest, DownloadCompletionTest) { task_runner_->RunUntilIdle(); } +TEST_F(DownloadServiceControllerImplTest, + UploadTestForSuccessPauseCancelFailureTimeout) { + auto create_entry = [this](unsigned int delay) { + Entry entry = test::BuildBasicEntry(Entry::State::ACTIVE); + entry.client = DownloadClient::TEST_3; + client3_->SetUploadResponseDelayForGuid(entry.guid, delay); + return entry; + }; + + auto verify_entry = + [this](const std::string& guid, + base::Optional<Entry::State> expected_state, + base::Optional<DriverEntry::State> expected_driver_state, + bool has_upload_data) { + auto* entry = model_->Get(guid); + auto driver_entry = driver_->Find(guid); + EXPECT_EQ(expected_state.has_value(), entry != nullptr); + if (expected_state.has_value()) { + EXPECT_EQ(expected_state.value(), entry->state); + EXPECT_EQ(has_upload_data, entry->has_upload_data); + } + + EXPECT_EQ(expected_driver_state.has_value(), driver_entry.has_value()); + if (expected_driver_state.has_value()) { + EXPECT_EQ(expected_driver_state.value(), driver_entry.value().state); + } + }; + + // entry1 - successful flow, entry2 - cancel before client response, + // entry3 - client response timeout, entry4 - network failure. + // entry5 - pause before client response. + Entry entry1 = create_entry(15); + Entry entry2 = create_entry(25); + Entry entry3 = create_entry(50); + Entry entry4 = create_entry(10); + Entry entry5 = create_entry(25); + config_->pending_upload_timeout_delay = base::TimeDelta::FromSeconds(30); + config_->max_concurrent_downloads = 8u; + config_->max_running_downloads = 8u; + config_->max_retry_count = 4u; + std::vector<Entry> entries = {entry1, entry2, entry3, entry4, entry5}; + + EXPECT_CALL(*client3_, OnServiceInitialized(false, _)).Times(1); + + // Set up the Controller. + device_status_listener_->SetDeviceStatus( + DeviceStatus(BatteryStatus::CHARGING, NetworkStatus::UNMETERED)); + + InitializeController(); + store_->TriggerInit(true, std::make_unique<std::vector<Entry>>(entries)); + file_monitor_->TriggerInit(true); + driver_->MakeReady(); + task_runner_->RunUntilIdle(); + + // No driver entry yet as entries are waiting for client response. + verify_entry(entry1.guid, Entry::State::ACTIVE, base::nullopt, false); + verify_entry(entry2.guid, Entry::State::ACTIVE, base::nullopt, false); + verify_entry(entry3.guid, Entry::State::ACTIVE, base::nullopt, false); + verify_entry(entry4.guid, Entry::State::ACTIVE, base::nullopt, false); + verify_entry(entry5.guid, Entry::State::ACTIVE, base::nullopt, false); + + // At 20 seconds. + task_runner_->FastForwardBy(base::TimeDelta::FromSeconds(20)); + + // Test that entry1 is marked as upload and is in progress. + verify_entry(entry1.guid, Entry::State::ACTIVE, + DriverEntry::State::IN_PROGRESS, true); + + // Successfully complete the upload for entry1. + EXPECT_CALL(*client3_, OnDownloadSucceeded(entry1.guid, _)).Times(1); + auto dentry1 = driver_->Find(entry1.guid); + dentry1.value().state = DriverEntry::State::COMPLETE; + driver_->NotifyDownloadSucceeded(dentry1.value()); + task_runner_->RunUntilIdle(); + verify_entry(entry1.guid, Entry::State::COMPLETE, + DriverEntry::State::COMPLETE, true); + + // Call PauseDownload before client response for entry5. + controller_->PauseDownload(entry5.guid); + task_runner_->RunUntilIdle(); + verify_entry(entry5.guid, Entry::State::PAUSED, base::nullopt, false); + + // Test CancelDownload before client response for entry2. + EXPECT_CALL(*client3_, + OnDownloadFailed(entry2.guid, Client::FailureReason::CANCELLED)) + .Times(1); + controller_->CancelDownload(entry2.guid); + task_runner_->RunUntilIdle(); + verify_entry(entry2.guid, base::nullopt, base::nullopt, false); + + // At 25 seconds. + task_runner_->FastForwardBy(base::TimeDelta::FromSeconds(5)); + + // Entry2, entry5 receive client response. + verify_entry(entry2.guid, base::nullopt, base::nullopt, false); + verify_entry(entry5.guid, Entry::State::PAUSED, base::nullopt, true); + + // Entry3 timeouts before client response. + EXPECT_CALL( + *client3_, + OnDownloadFailed(entry3.guid, Client::FailureReason::UPLOAD_TIMEDOUT)) + .Times(1); + + // At 40 seconds. + task_runner_->FastForwardBy(base::TimeDelta::FromSeconds(15)); + verify_entry(entry3.guid, base::nullopt, base::nullopt, false); + + // Test network failure for entry4. First check the entry is in progress. + verify_entry(entry4.guid, Entry::State::ACTIVE, + DriverEntry::State::IN_PROGRESS, true); + EXPECT_CALL(*client3_, + OnDownloadFailed(entry4.guid, Client::FailureReason::NETWORK)) + .Times(1); + DriverEntry dentry4 = + BuildDriverEntry(entry4, DriverEntry::State::INTERRUPTED); + driver_->NotifyDownloadFailed(dentry4, FailureType::NOT_RECOVERABLE); + task_runner_->RunUntilIdle(); + verify_entry(entry4.guid, base::nullopt, base::nullopt, false); + + // Entry5 is still paused, call ResumeDownload. It should make another fresh + // request for data. + verify_entry(entry5.guid, Entry::State::PAUSED, base::nullopt, true); + controller_->ResumeDownload(entry5.guid); + task_runner_->RunUntilIdle(); + verify_entry(entry5.guid, Entry::State::ACTIVE, base::nullopt, true); + + // At 65 seconds. Entry5 receives data for the second time and continues. + task_runner_->FastForwardBy(base::TimeDelta::FromSeconds(25)); + verify_entry(entry5.guid, Entry::State::ACTIVE, + DriverEntry::State::IN_PROGRESS, true); +} + TEST_F(DownloadServiceControllerImplTest, StartupRecovery) { EXPECT_CALL(*client_, OnServiceInitialized(false, _)).Times(1); @@ -1303,6 +1472,92 @@ TEST_F(DownloadServiceControllerImplTest, StartupRecovery) { EXPECT_EQ(base::nullopt, driver_->Find(entries[24].guid)); } +TEST_F(DownloadServiceControllerImplTest, StartupRecoveryForUploadEntries) { + EXPECT_CALL(*client_, OnServiceInitialized(false, _)).Times(1); + + std::vector<Entry> entries; + std::vector<DriverEntry> driver_entries; + + auto add_entry = [&entries, &driver_entries]( + Entry::State state, + base::Optional<DriverEntry::State> driver_state) { + Entry entry = test::BuildBasicEntry(state); + entry.has_upload_data = true; + if (state == Entry::State::COMPLETE) + entry.completion_time = base::Time::Now(); + + entries.push_back(entry); + if (driver_state.has_value()) + driver_entries.push_back(BuildDriverEntry(entry, driver_state.value())); + }; + + add_entry(Entry::State::ACTIVE, DriverEntry::State::IN_PROGRESS); + add_entry(Entry::State::ACTIVE, DriverEntry::State::COMPLETE); + add_entry(Entry::State::ACTIVE, DriverEntry::State::CANCELLED); + add_entry(Entry::State::ACTIVE, DriverEntry::State::INTERRUPTED); + add_entry(Entry::State::ACTIVE, base::nullopt); + + add_entry(Entry::State::PAUSED, DriverEntry::State::IN_PROGRESS); + add_entry(Entry::State::PAUSED, DriverEntry::State::COMPLETE); + add_entry(Entry::State::PAUSED, DriverEntry::State::CANCELLED); + add_entry(Entry::State::PAUSED, DriverEntry::State::INTERRUPTED); + add_entry(Entry::State::PAUSED, base::nullopt); + + add_entry(Entry::State::COMPLETE, DriverEntry::State::IN_PROGRESS); + add_entry(Entry::State::COMPLETE, DriverEntry::State::COMPLETE); + add_entry(Entry::State::COMPLETE, DriverEntry::State::CANCELLED); + add_entry(Entry::State::COMPLETE, DriverEntry::State::INTERRUPTED); + add_entry(Entry::State::COMPLETE, base::nullopt); + + // Set up the Controller. + device_status_listener_->SetDeviceStatus( + DeviceStatus(BatteryStatus::CHARGING, NetworkStatus::UNMETERED)); + + InitializeController(); + driver_->AddTestData(driver_entries); + driver_->MakeReady(); + store_->AutomaticallyTriggerAllFutureCallbacks(true); + store_->TriggerInit(true, std::make_unique<std::vector<Entry>>(entries)); + file_monitor_->TriggerInit(true); + + // Allow the initialization routines and persistent layers to do their thing. + task_runner_->RunUntilIdle(); + + auto verify_entry = [this](const std::string& guid, Entry::State state, + base::Optional<DriverEntry::State> driver_state) { + EXPECT_EQ(state, model_->Get(guid)->state); + auto driver_entry = driver_->Find(guid); + EXPECT_EQ(driver_state.has_value(), driver_entry.has_value()); + if (driver_entry.has_value()) + EXPECT_EQ(driver_state, driver_entry->state); + }; + + // Validate Model and DownloadDriver states. Any IN_PROGRESS or INTERRUPTED + // download should be moved to complete state for ACTIVE/PAUSED entries. + + // Entry::State::ACTIVE. + verify_entry(entries[0].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[1].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[2].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[3].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[4].guid, Entry::State::ACTIVE, + DriverEntry::State::IN_PROGRESS); + + // Entry::State::PAUSED. + verify_entry(entries[5].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[6].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[7].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[8].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[9].guid, Entry::State::PAUSED, base::nullopt); + + // Entry::State::COMPLETE. + verify_entry(entries[10].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[11].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[12].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[13].guid, Entry::State::COMPLETE, base::nullopt); + verify_entry(entries[14].guid, Entry::State::COMPLETE, base::nullopt); +} + TEST_F(DownloadServiceControllerImplTest, ExistingExternalDownload) { Entry entry1 = test::BuildBasicEntry(Entry::State::ACTIVE); Entry entry2 = test::BuildBasicEntry(Entry::State::ACTIVE); @@ -1346,6 +1601,7 @@ TEST_F(DownloadServiceControllerImplTest, ExistingExternalDownload) { // Simulate a successful external download. driver_->NotifyDownloadSucceeded(dentry2); + task_runner_->RunUntilIdle(); EXPECT_TRUE(driver_->Find(entry1.guid).has_value()); EXPECT_FALSE(driver_->Find(entry1.guid).value().paused); @@ -1391,7 +1647,7 @@ TEST_F(DownloadServiceControllerImplTest, NewExternalDownload) { // Simulate a newly created external download. driver_->Start(RequestParams(), dentry2.guid, dentry2.current_file_path, - TRAFFIC_ANNOTATION_FOR_TESTS); + nullptr, TRAFFIC_ANNOTATION_FOR_TESTS); EXPECT_TRUE(driver_->Find(entry1.guid).value().paused); EXPECT_FALSE(driver_->Find(entry2.guid).value().paused); @@ -1420,7 +1676,7 @@ TEST_F(DownloadServiceControllerImplTest, NewExternalDownload) { // Rebuild the download so we can simulate more. dentry2.state = DriverEntry::State::IN_PROGRESS; driver_->Start(RequestParams(), dentry2.guid, dentry2.current_file_path, - TRAFFIC_ANNOTATION_FOR_TESTS); + nullptr, TRAFFIC_ANNOTATION_FOR_TESTS); EXPECT_TRUE(driver_->Find(entry1.guid).value().paused); EXPECT_FALSE(driver_->Find(entry2.guid).value().paused); diff --git a/chromium/components/download/internal/background_service/debugging_client.cc b/chromium/components/download/internal/background_service/debugging_client.cc index dedf1c1cb12..427bf4d70d2 100644 --- a/chromium/components/download/internal/background_service/debugging_client.cc +++ b/chromium/components/download/internal/background_service/debugging_client.cc @@ -4,6 +4,9 @@ #include "components/download/internal/background_service/debugging_client.h" +#include "base/threading/sequenced_task_runner_handle.h" +#include "services/network/public/cpp/resource_request_body.h" + namespace download { void DebuggingClient::OnServiceInitialized( @@ -37,4 +40,10 @@ bool DebuggingClient::CanServiceRemoveDownloadedFile(const std::string& guid, return true; } +void DebuggingClient::GetUploadData(const std::string& guid, + GetUploadDataCallback callback) { + base::SequencedTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce(std::move(callback), nullptr)); +} + } // namespace download diff --git a/chromium/components/download/internal/background_service/debugging_client.h b/chromium/components/download/internal/background_service/debugging_client.h index bd249f334b8..89fc7a59b4e 100644 --- a/chromium/components/download/internal/background_service/debugging_client.h +++ b/chromium/components/download/internal/background_service/debugging_client.h @@ -34,6 +34,8 @@ class DebuggingClient : public Client { const CompletionInfo& completion_info) override; bool CanServiceRemoveDownloadedFile(const std::string& guid, bool force_delete) override; + void GetUploadData(const std::string& guid, + GetUploadDataCallback callback) override; DISALLOW_COPY_AND_ASSIGN(DebuggingClient); }; diff --git a/chromium/components/download/internal/background_service/download_blockage_status.cc b/chromium/components/download/internal/background_service/download_blockage_status.cc new file mode 100644 index 00000000000..49fc103d031 --- /dev/null +++ b/chromium/components/download/internal/background_service/download_blockage_status.cc @@ -0,0 +1,22 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/background_service/download_blockage_status.h" + +namespace download { + +DownloadBlockageStatus::DownloadBlockageStatus() + : blocked_by_criteria(false), + blocked_by_navigation(false), + blocked_by_downloads(false), + entry_not_active(false) {} + +DownloadBlockageStatus::~DownloadBlockageStatus() = default; + +bool DownloadBlockageStatus::IsBlocked() { + return blocked_by_criteria || blocked_by_navigation || blocked_by_downloads || + entry_not_active; +} + +} // namespace download diff --git a/chromium/components/download/internal/background_service/download_blockage_status.h b/chromium/components/download/internal/background_service/download_blockage_status.h new file mode 100644 index 00000000000..e6ef9eaefab --- /dev/null +++ b/chromium/components/download/internal/background_service/download_blockage_status.h @@ -0,0 +1,30 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_DOWNLOAD_INTERNAL_BACKGROUND_SERVICE_DOWNLOAD_BLOCKAGE_STATUS_H_ +#define COMPONENTS_DOWNLOAD_INTERNAL_BACKGROUND_SERVICE_DOWNLOAD_BLOCKAGE_STATUS_H_ + +#include "base/macros.h" +#include "base/optional.h" + +namespace download { + +// A helper class representing various conditions where a download can be +// blocked. +struct DownloadBlockageStatus { + bool blocked_by_criteria; + bool blocked_by_navigation; + bool blocked_by_downloads; + bool entry_not_active; + + DownloadBlockageStatus(); + ~DownloadBlockageStatus(); + + // Whether the download is blocked currently. + bool IsBlocked(); +}; + +} // namespace download + +#endif // COMPONENTS_DOWNLOAD_INTERNAL_BACKGROUND_SERVICE_DOWNLOAD_BLOCKAGE_STATUS_H_ diff --git a/chromium/components/download/internal/background_service/download_driver.h b/chromium/components/download/internal/background_service/download_driver.h index 476fa7660f0..02433403bcd 100644 --- a/chromium/components/download/internal/background_service/download_driver.h +++ b/chromium/components/download/internal/background_service/download_driver.h @@ -17,6 +17,10 @@ namespace base { class FilePath; } // namespace base +namespace network { +class ResourceRequestBody; +} // namespace network + namespace download { struct RequestParams; @@ -84,6 +88,7 @@ class DownloadDriver : public MemoryTracker { const RequestParams& request_params, const std::string& guid, const base::FilePath& file_path, + scoped_refptr<network::ResourceRequestBody> post_body, const net::NetworkTrafficAnnotationTag& traffic_annotation) = 0; // Cancels an existing download, all data associated with this download should diff --git a/chromium/components/download/internal/background_service/download_store.cc b/chromium/components/download/internal/background_service/download_store.cc index ab1ffac3417..66763bd62ff 100644 --- a/chromium/components/download/internal/background_service/download_store.cc +++ b/chromium/components/download/internal/background_service/download_store.cc @@ -40,8 +40,11 @@ bool DownloadStore::IsInitialized() { void DownloadStore::Initialize(InitCallback callback) { DCHECK(!IsInitialized()); - db_->Init(kDatabaseClientName, database_dir_, - leveldb_proto::CreateSimpleOptions(), + // These options reduce memory consumption. + leveldb_env::Options options = leveldb_proto::CreateSimpleOptions(); + options.reuse_logs = false; + options.write_buffer_size = 64 << 10; // 64 KiB + db_->Init(kDatabaseClientName, database_dir_, options, base::BindOnce(&DownloadStore::OnDatabaseInited, weak_factory_.GetWeakPtr(), std::move(callback))); } diff --git a/chromium/components/download/internal/background_service/driver_entry.h b/chromium/components/download/internal/background_service/driver_entry.h index 148308e6c4e..170279bd0a0 100644 --- a/chromium/components/download/internal/background_service/driver_entry.h +++ b/chromium/components/download/internal/background_service/driver_entry.h @@ -10,6 +10,8 @@ #include "base/files/file_path.h" #include "base/memory/ref_counted.h" +#include "base/optional.h" +#include "storage/browser/blob/blob_data_handle.h" #include "url/gurl.h" namespace net { @@ -63,8 +65,13 @@ struct DriverEntry { // target file path requested while the file is downloading, as it may // download to a temporary path. After completion, this would be set to the // target file path. + // Will be empty file path in incognito mode. base::FilePath current_file_path; + // The blob data handle that contains download data. + // Will be available after the download is completed in incognito mode. + base::Optional<storage::BlobDataHandle> blob_handle; + // Time the download was marked as complete, base::Time() if the download is // not yet complete. base::Time completion_time; diff --git a/chromium/components/download/internal/background_service/entry.cc b/chromium/components/download/internal/background_service/entry.cc index 137b390152a..c13e42d986e 100644 --- a/chromium/components/download/internal/background_service/entry.cc +++ b/chromium/components/download/internal/background_service/entry.cc @@ -12,7 +12,8 @@ Entry::Entry() : bytes_downloaded(0u), attempt_count(0), resumption_count(0), - cleanup_attempt_count(0) {} + cleanup_attempt_count(0), + has_upload_data(false) {} Entry::Entry(const Entry& other) = default; Entry::Entry(const DownloadParams& params) @@ -25,6 +26,7 @@ Entry::Entry(const DownloadParams& params) attempt_count(0), resumption_count(0), cleanup_attempt_count(0), + has_upload_data(false), traffic_annotation(params.traffic_annotation) {} Entry::~Entry() = default; @@ -49,6 +51,7 @@ bool Entry::operator==(const Entry& other) const { attempt_count == other.attempt_count && resumption_count == other.resumption_count && cleanup_attempt_count == other.cleanup_attempt_count && + has_upload_data == other.has_upload_data && traffic_annotation == other.traffic_annotation; } diff --git a/chromium/components/download/internal/background_service/entry.h b/chromium/components/download/internal/background_service/entry.h index f3efe2b22ec..90bf85070a1 100644 --- a/chromium/components/download/internal/background_service/entry.h +++ b/chromium/components/download/internal/background_service/entry.h @@ -96,6 +96,11 @@ struct Entry { // Stores the number of times the service tried to delete the download file. uint32_t cleanup_attempt_count; + // Stores whether this request has some data to be uploaded. This is set to + // true only when the client has provided with the upload data and is not + // cleared afterwards. Retry and resumption logic are impacted by this. + bool has_upload_data; + // Traffic annotation for the network request. net::MutableNetworkTrafficAnnotationTag traffic_annotation; }; diff --git a/chromium/components/download/internal/background_service/entry_utils_unittest.cc b/chromium/components/download/internal/background_service/entry_utils_unittest.cc index 3730fb535ca..fda16a7f2a8 100644 --- a/chromium/components/download/internal/background_service/entry_utils_unittest.cc +++ b/chromium/components/download/internal/background_service/entry_utils_unittest.cc @@ -6,7 +6,6 @@ #include <algorithm> -#include "base/memory/ptr_util.h" #include "components/download/internal/background_service/test/entry_utils.h" #include "components/download/public/background_service/clients.h" #include "components/download/public/background_service/download_metadata.h" diff --git a/chromium/components/download/internal/background_service/file_monitor_impl.cc b/chromium/components/download/internal/background_service/file_monitor_impl.cc index 9bb4e68b009..e7b0055f8d8 100644 --- a/chromium/components/download/internal/background_service/file_monitor_impl.cc +++ b/chromium/components/download/internal/background_service/file_monitor_impl.cc @@ -8,7 +8,6 @@ #include "base/callback_helpers.h" #include "base/files/file_enumerator.h" #include "base/files/file_util.h" -#include "base/memory/ptr_util.h" #include "base/stl_util.h" #include "base/sys_info.h" #include "base/task_runner_util.h" diff --git a/chromium/components/download/internal/background_service/in_memory_download.cc b/chromium/components/download/internal/background_service/in_memory_download.cc index 091f0e19bf8..630fcca9ff0 100644 --- a/chromium/components/download/internal/background_service/in_memory_download.cc +++ b/chromium/components/download/internal/background_service/in_memory_download.cc @@ -8,104 +8,19 @@ #include <string> #include "base/bind.h" -#include "base/strings/string_util.h" #include "components/download/internal/background_service/blob_task_proxy.h" -#include "net/base/completion_callback.h" -#include "net/base/io_buffer.h" -#include "net/base/net_errors.h" -#include "net/http/http_status_code.h" +#include "net/base/load_flags.h" #include "net/traffic_annotation/network_traffic_annotation.h" -#include "net/url_request/url_fetcher.h" #include "storage/browser/blob/blob_data_handle.h" #include "storage/browser/blob/blob_storage_context.h" namespace download { -namespace { - -// Converts a string to HTTP method used by URLFetcher. -net::URLFetcher::RequestType ToRequestType(const std::string& method) { - // Only supports GET and POST. - if (base::EqualsCaseInsensitiveASCII(method, "GET")) - return net::URLFetcher::RequestType::GET; - if (base::EqualsCaseInsensitiveASCII(method, "POST")) - return net::URLFetcher::RequestType::POST; - - NOTREACHED(); - return net::URLFetcher::RequestType::GET; -} - -} // namespace - -InMemoryDownloadImpl::ResponseWriter::ResponseWriter( - scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) - : paused_on_io_(false), io_task_runner_(io_task_runner) {} - -InMemoryDownloadImpl::ResponseWriter::~ResponseWriter() = default; - -void InMemoryDownloadImpl::ResponseWriter::Pause() { - io_task_runner_->PostTask( - FROM_HERE, - base::BindOnce(&ResponseWriter::PauseOnIO, base::Unretained(this))); -} - -void InMemoryDownloadImpl::ResponseWriter::PauseOnIO() { - io_task_runner_->BelongsToCurrentThread(); - paused_on_io_ = true; -} - -void InMemoryDownloadImpl::ResponseWriter::Resume() { - io_task_runner_->PostTask( - FROM_HERE, - base::BindOnce(&ResponseWriter::ResumeOnIO, base::Unretained(this))); -} - -void InMemoryDownloadImpl::ResponseWriter::ResumeOnIO() { - io_task_runner_->BelongsToCurrentThread(); - paused_on_io_ = false; - - // Continue read from network layer. Since we didn't write on pause, report - // 0 byte to network layer. - if (!write_callback_.is_null()) { - base::ResetAndReturn(&write_callback_).Run(0u); - } -} - -std::unique_ptr<std::string> InMemoryDownloadImpl::ResponseWriter::TakeData() { - return std::move(data_); -} - -int InMemoryDownloadImpl::ResponseWriter::Initialize( - const net::CompletionCallback& callback) { - data_ = std::make_unique<std::string>(); - return net::OK; -} - -int InMemoryDownloadImpl::ResponseWriter::Write( - net::IOBuffer* buffer, - int num_bytes, - const net::CompletionCallback& callback) { - io_task_runner_->BelongsToCurrentThread(); - - if (paused_on_io_) { - write_callback_ = callback; - return net::ERR_IO_PENDING; - } - - DCHECK(data_); - data_->append(buffer->data(), num_bytes); - return num_bytes; -} - -int InMemoryDownloadImpl::ResponseWriter::Finish( - int net_error, - const net::CompletionCallback& callback) { - io_task_runner_->BelongsToCurrentThread(); - return net::OK; -} - InMemoryDownload::InMemoryDownload(const std::string& guid) - : guid_(guid), state_(State::INITIAL), bytes_downloaded_(0u) {} + : guid_(guid), + state_(State::INITIAL), + paused_(false), + bytes_downloaded_(0u) {} InMemoryDownload::~InMemoryDownload() = default; @@ -114,18 +29,17 @@ InMemoryDownloadImpl::InMemoryDownloadImpl( const RequestParams& request_params, const net::NetworkTrafficAnnotationTag& traffic_annotation, Delegate* delegate, - scoped_refptr<net::URLRequestContextGetter> request_context_getter, + network::mojom::URLLoaderFactory* url_loader_factory, BlobTaskProxy::BlobContextGetter blob_context_getter, scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) : InMemoryDownload(guid), request_params_(request_params), traffic_annotation_(traffic_annotation), - request_context_getter_(request_context_getter), + url_loader_factory_(url_loader_factory), blob_task_proxy_( BlobTaskProxy::Create(blob_context_getter, io_task_runner)), io_task_runner_(io_task_runner), delegate_(delegate), - paused_(false), completion_notified_(false), weak_ptr_factory_(this) { DCHECK(!guid_.empty()); @@ -142,30 +56,11 @@ void InMemoryDownloadImpl::Start() { } void InMemoryDownloadImpl::Pause() { - if (paused_) - return; - paused_ = true; - - switch (state_) { - case State::INITIAL: - // Do nothing. - return; - case State::IN_PROGRESS: - // Do nothing if network operation is done. - DCHECK(response_writer_); - response_writer_->Pause(); - return; - case State::FAILED: - return; - case State::COMPLETE: - // Do nothing. - return; - } + if (state_ == State::IN_PROGRESS) + paused_ = true; } void InMemoryDownloadImpl::Resume() { - if (!paused_) - return; paused_ = false; switch (state_) { @@ -173,12 +68,13 @@ void InMemoryDownloadImpl::Resume() { NOTREACHED(); return; case State::IN_PROGRESS: - // Do nothing if network operation is done. - DCHECK(response_writer_); - response_writer_->Resume(); + // Let the network pipe continue to read data. + if (resume_callback_) + std::move(resume_callback_).Run(); return; case State::FAILED: - // Restart the network layer. No ongoing blob task should exist. + // Restart the download. + Reset(); SendRequest(); state_ = State::IN_PROGRESS; return; @@ -188,10 +84,10 @@ void InMemoryDownloadImpl::Resume() { } } -std::unique_ptr<storage::BlobDataHandle> InMemoryDownloadImpl::ResultAsBlob() { +std::unique_ptr<storage::BlobDataHandle> InMemoryDownloadImpl::ResultAsBlob() + const { DCHECK(state_ == State::COMPLETE || state_ == State::FAILED); - // Return a copy, we keep one reference of the underlying data to avoid - // unexpected deletion. + // Return a copy. return std::make_unique<storage::BlobDataHandle>(*blob_data_handle_); } @@ -199,64 +95,48 @@ size_t InMemoryDownloadImpl::EstimateMemoryUsage() const { return bytes_downloaded_; } -void InMemoryDownloadImpl::OnURLFetchDownloadProgress( - const net::URLFetcher* source, - int64_t current, - int64_t total, - int64_t current_network_bytes) { - bytes_downloaded_ = current; +void InMemoryDownloadImpl::OnDataReceived(base::StringPiece string_piece, + base::OnceClosure resume) { + size_t size = string_piece.as_string().size(); + data_.append(string_piece.as_string().data(), size); + bytes_downloaded_ += size; + + if (paused_) { + // Read data later and cache the resumption callback when paused. + resume_callback_ = std::move(resume); + return; + } + + // Continue to read data. + std::move(resume).Run(); // TODO(xingliu): Throttle the update frequency. See https://crbug.com/809674. if (delegate_) delegate_->OnDownloadProgress(this); } -void InMemoryDownloadImpl::OnURLFetchComplete(const net::URLFetcher* source) { - DCHECK(source); - response_headers_ = source->GetResponseHeaders(); +void InMemoryDownloadImpl::OnComplete(bool success) { + if (success) { + SaveAsBlob(); + return; + } - switch (source->GetStatus().status()) { - case net::URLRequestStatus::Status::SUCCESS: - if (HandleResponseCode(source->GetResponseCode())) { - SaveAsBlob(); - return; - } + state_ = State::FAILED; - state_ = State::FAILED; - NotifyDelegateDownloadComplete(); - return; - case net::URLRequestStatus::Status::IO_PENDING: - return; - case net::URLRequestStatus::Status::CANCELED: - case net::URLRequestStatus::Status::FAILED: - state_ = State::FAILED; - NotifyDelegateDownloadComplete(); - return; - } + // Release download data. + data_.clear(); + NotifyDelegateDownloadComplete(); } -bool InMemoryDownloadImpl::HandleResponseCode(int response_code) { - switch (response_code) { - case -1: // Non-HTTP request. - case net::HTTP_OK: - case net::HTTP_NON_AUTHORITATIVE_INFORMATION: - case net::HTTP_PARTIAL_CONTENT: - case net::HTTP_CREATED: - case net::HTTP_ACCEPTED: - case net::HTTP_NO_CONTENT: - case net::HTTP_RESET_CONTENT: - return true; - // All other codes are considered as failed. - default: - return false; - } +void InMemoryDownloadImpl::OnRetry(base::OnceClosure start_retry) { + Reset(); + std::move(start_retry).Run(); } void InMemoryDownloadImpl::SaveAsBlob() { - DCHECK(url_fetcher_); - std::unique_ptr<std::string> data = response_writer_->TakeData(); auto callback = base::BindOnce(&InMemoryDownloadImpl::OnSaveBlobDone, weak_ptr_factory_.GetWeakPtr()); + auto data = std::make_unique<std::string>(std::move(data_)); blob_task_proxy_->SaveAsBlob(std::move(data), std::move(callback)); } @@ -270,12 +150,13 @@ void InMemoryDownloadImpl::OnSaveBlobDone( // TODO(xingliu): Add metric for blob status code. If failed, consider remove // |blob_data_handle_|. See https://crbug.com/809674. + DCHECK(data_.empty()) + << "Download data should be contained in |blob_data_handle_|."; blob_data_handle_ = std::move(blob_handle); completion_time_ = base::Time::Now(); // Resets network backend. - response_writer_ = nullptr; - url_fetcher_.reset(); + loader_.reset(); // Not considering |paused_| here, if pause after starting a blob operation, // just let it finish. @@ -292,21 +173,25 @@ void InMemoryDownloadImpl::NotifyDelegateDownloadComplete() { } void InMemoryDownloadImpl::SendRequest() { - url_fetcher_ = net::URLFetcher::Create(request_params_.url, - ToRequestType(request_params_.method), - this, traffic_annotation_); - url_fetcher_->SetRequestContext(request_context_getter_.get()); - url_fetcher_->SetExtraRequestHeaders( - request_params_.request_headers.ToString()); - response_writer_ = - new ResponseWriter(request_context_getter_->GetNetworkTaskRunner()); - url_fetcher_->SaveResponseWithWriter( - std::unique_ptr<ResponseWriter>(response_writer_)); - url_fetcher_->Start(); - - // Pause on network thread if needed. - if (paused_) - response_writer_->Pause(); + auto request = std::make_unique<network::ResourceRequest>(); + request->url = request_params_.url; + request->method = request_params_.method; + request->headers = request_params_.request_headers; + request->load_flags = net::LOAD_DISABLE_CACHE; + + loader_ = + network::SimpleURLLoader::Create(std::move(request), traffic_annotation_); + + // TODO(xingliu): Use SimpleURLLoader's retry when it won't hit CHECK in + // SharedURLLoaderFactory. + loader_->DownloadAsStream(url_loader_factory_, this); +} + +void InMemoryDownloadImpl::Reset() { + data_.clear(); + bytes_downloaded_ = 0u; + completion_notified_ = false; + resume_callback_.Reset(); } } // namespace download diff --git a/chromium/components/download/internal/background_service/in_memory_download.h b/chromium/components/download/internal/background_service/in_memory_download.h index 8f97c9ea4c8..69d0e5ad211 100644 --- a/chromium/components/download/internal/background_service/in_memory_download.h +++ b/chromium/components/download/internal/background_service/in_memory_download.h @@ -14,13 +14,11 @@ #include "base/single_thread_task_runner.h" #include "components/download/internal/background_service/blob_task_proxy.h" #include "components/download/public/background_service/download_params.h" -#include "net/base/completion_callback.h" -#include "net/url_request/url_fetcher_delegate.h" -#include "net/url_request/url_fetcher_response_writer.h" -#include "net/url_request/url_request_context_getter.h" +#include "services/network/public/cpp/simple_url_loader.h" +#include "services/network/public/cpp/simple_url_loader_stream_consumer.h" +#include "services/network/public/mojom/url_loader_factory.mojom.h" namespace net { -class URLFetcher; struct NetworkTrafficAnnotationTag; } // namespace net @@ -78,9 +76,7 @@ class InMemoryDownload { FAILED, // Download is completed, and data is successfully saved as a blob. - // 1. We guarantee the states of network responses. - // 2. Do not guarantee the state of blob data. The consumer of blob - // should validate its state when using it on IO thread. + // Guarantee the blob is fully constructed. COMPLETE, }; @@ -96,7 +92,7 @@ class InMemoryDownload { virtual void Resume() = 0; // Get a copy of blob data handle. - virtual std::unique_ptr<storage::BlobDataHandle> ResultAsBlob() = 0; + virtual std::unique_ptr<storage::BlobDataHandle> ResultAsBlob() const = 0; // Returns the estimate of dynamically allocated memory in bytes. virtual size_t EstimateMemoryUsage() const = 0; @@ -104,6 +100,7 @@ class InMemoryDownload { const std::string& guid() const { return guid_; } uint64_t bytes_downloaded() const { return bytes_downloaded_; } State state() const { return state_; } + bool paused() const { return paused_; } const base::Time& completion_time() const { return completion_time_; } scoped_refptr<const net::HttpResponseHeaders> response_headers() const { return response_headers_; @@ -117,6 +114,9 @@ class InMemoryDownload { State state_; + // If the download is paused. + bool paused_; + // Completion time of download when data is saved as blob. base::Time completion_time_; @@ -129,14 +129,15 @@ class InMemoryDownload { DISALLOW_COPY_AND_ASSIGN(InMemoryDownload); }; -// Implementation of InMemoryDownload and uses URLFetcher as network backend. +// Implementation of InMemoryDownload and uses SimpleURLLoader as network +// backend. // Threading contract: // 1. This object lives on the main thread. // 2. Reading/writing IO buffer from network is done on another thread, // based on |request_context_getter_|. When complete, main thread is notified. // 3. After network IO is done, Blob related work is done on IO thread with // |blob_task_proxy_|, then notify the result to main thread. -class InMemoryDownloadImpl : public net::URLFetcherDelegate, +class InMemoryDownloadImpl : public network::SimpleURLLoaderStreamConsumer, public InMemoryDownload { public: InMemoryDownloadImpl( @@ -144,69 +145,26 @@ class InMemoryDownloadImpl : public net::URLFetcherDelegate, const RequestParams& request_params, const net::NetworkTrafficAnnotationTag& traffic_annotation, Delegate* delegate, - scoped_refptr<net::URLRequestContextGetter> request_context_getter, + network::mojom::URLLoaderFactory* url_loader_factory, BlobTaskProxy::BlobContextGetter blob_context_getter, scoped_refptr<base::SingleThreadTaskRunner> io_task_runner); + ~InMemoryDownloadImpl() override; private: - // Response writer that supports pause and resume operations. - class ResponseWriter : public net::URLFetcherResponseWriter { - public: - ResponseWriter(scoped_refptr<base::SingleThreadTaskRunner> io_task_runner); - ~ResponseWriter() override; - - // Pause writing data from pipe into |data_|. - void Pause(); - - // Resume writing data from the pipe into |data_|. - void Resume(); - - // Take the data, must be called after the network layer completes its job. - std::unique_ptr<std::string> TakeData(); - - private: - // net::URLFetcherResponseWriter implementation. - int Initialize(const net::CompletionCallback& callback) override; - int Write(net::IOBuffer* buffer, - int num_bytes, - const net::CompletionCallback& callback) override; - int Finish(int net_error, const net::CompletionCallback& callback) override; - - void PauseOnIO(); - void ResumeOnIO(); - - // Download data, should be moved to avoid extra copy. - std::unique_ptr<std::string> data_; - - bool paused_on_io_; - scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; - - // When paused, cached callback to trigger the next read. Must be set and - // called on fetcher's IO thread. - net::CompletionCallback write_callback_; - - DISALLOW_COPY_AND_ASSIGN(ResponseWriter); - }; - // InMemoryDownload implementation. void Start() override; void Pause() override; void Resume() override; - std::unique_ptr<storage::BlobDataHandle> ResultAsBlob() override; + std::unique_ptr<storage::BlobDataHandle> ResultAsBlob() const override; size_t EstimateMemoryUsage() const override; - // net::URLFetcherDelegate implementation. - void OnURLFetchDownloadProgress(const net::URLFetcher* source, - int64_t current, - int64_t total, - int64_t current_network_bytes) override; - void OnURLFetchComplete(const net::URLFetcher* source) override; - - // Handles response code and change the state accordingly. - // Returns if the response code is considered as successful code. - bool HandleResponseCode(int response_code); + // network::SimpleURLLoaderStreamConsumer implementation. + void OnDataReceived(base::StringPiece string_piece, + base::OnceClosure resume) override; + void OnComplete(bool success) override; + void OnRetry(base::OnceClosure start_retry) override; // Saves the download data into blob storage. void SaveAsBlob(); @@ -218,26 +176,23 @@ class InMemoryDownloadImpl : public net::URLFetcherDelegate, // call. void NotifyDelegateDownloadComplete(); - // Sends the network request. + // Sends a new network request. void SendRequest(); + // Resets local states. + void Reset(); + // Request parameters of the download. const RequestParams request_params_; // Traffic annotation of the request. const net::NetworkTrafficAnnotationTag traffic_annotation_; - // Used to send requests to servers. Also contains the download data in its - // string buffer. We should avoid extra copy on the data and release the - // memory when needed. - std::unique_ptr<net::URLFetcher> url_fetcher_; - - // Owned by |url_fetcher_|. Lives on fetcher's delegate thread, perform - // network IO on fetcher's IO thread. - ResponseWriter* response_writer_; + // Used to send requests to servers. + std::unique_ptr<network::SimpleURLLoader> loader_; - // Request context getter used by |url_fetcher_|. - scoped_refptr<net::URLRequestContextGetter> request_context_getter_; + // Used to handle network response. + network::mojom::URLLoaderFactory* url_loader_factory_; // Worker that does blob related task on IO thread. std::unique_ptr<BlobTaskProxy> blob_task_proxy_; @@ -251,7 +206,11 @@ class InMemoryDownloadImpl : public net::URLFetcherDelegate, Delegate* delegate_; - bool paused_; + // Data downloaded from network, should be moved to avoid extra copy. + std::string data_; + + // Cached callback to let network backend continue to pull data. + base::OnceClosure resume_callback_; // Ensures Delegate::OnDownloadComplete is only called once. bool completion_notified_; diff --git a/chromium/components/download/internal/background_service/in_memory_download_driver.cc b/chromium/components/download/internal/background_service/in_memory_download_driver.cc index 186fda2fd6b..99ba0970550 100644 --- a/chromium/components/download/internal/background_service/in_memory_download_driver.cc +++ b/chromium/components/download/internal/background_service/in_memory_download_driver.cc @@ -4,6 +4,9 @@ #include "components/download/internal/background_service/in_memory_download_driver.h" +#include "components/download/internal/background_service/in_memory_download.h" +#include "services/network/public/cpp/resource_request_body.h" + namespace download { namespace { @@ -28,29 +31,32 @@ DriverEntry CreateDriverEntry(const InMemoryDownload& download) { DriverEntry entry; entry.guid = download.guid(); entry.state = ToDriverEntryState(download.state()); - // TODO(xingliu): Support pause. See https://crbug.com/809674. - entry.paused = false; - entry.done = entry.state == DriverEntry::State::INTERRUPTED || - entry.state == DriverEntry::State::COMPLETE || + entry.paused = download.paused(); + entry.done = entry.state == DriverEntry::State::COMPLETE || entry.state == DriverEntry::State::CANCELLED; entry.bytes_downloaded = download.bytes_downloaded(); entry.response_headers = download.response_headers(); if (entry.response_headers) { entry.expected_total_size = entry.response_headers->GetContentLength(); } - // TODO(xingliu): Support resumption. UrlFetcher doesn't expose url chain. - // Figure out if empty url chain is OK and how url chain is used. + // Currently incognito mode network backend can't resume in the middle. entry.can_resume = false; + + if (download.state() == InMemoryDownload::State::COMPLETE) { + auto blob_handle = download.ResultAsBlob(); + if (blob_handle) + entry.blob_handle = base::Optional<storage::BlobDataHandle>(*blob_handle); + } return entry; } } // namespace InMemoryDownloadFactory::InMemoryDownloadFactory( - scoped_refptr<net::URLRequestContextGetter> request_context_getter, + network::mojom::URLLoaderFactory* url_loader_factory, BlobTaskProxy::BlobContextGetter blob_context_getter, scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) - : request_context_getter_(request_context_getter), + : url_loader_factory_(url_loader_factory), blob_context_getter_(blob_context_getter), io_task_runner_(io_task_runner) {} @@ -61,9 +67,10 @@ std::unique_ptr<InMemoryDownload> InMemoryDownloadFactory::Create( const RequestParams& request_params, const net::NetworkTrafficAnnotationTag& traffic_annotation, InMemoryDownload::Delegate* delegate) { + DCHECK(url_loader_factory_); return std::make_unique<InMemoryDownloadImpl>( - guid, request_params, traffic_annotation, delegate, - request_context_getter_, blob_context_getter_, io_task_runner_); + guid, request_params, traffic_annotation, delegate, url_loader_factory_, + blob_context_getter_, io_task_runner_); } InMemoryDownloadDriver::InMemoryDownloadDriver( @@ -91,6 +98,7 @@ void InMemoryDownloadDriver::Start( const RequestParams& request_params, const std::string& guid, const base::FilePath& file_path, + scoped_refptr<network::ResourceRequestBody> post_body, const net::NetworkTrafficAnnotationTag& traffic_annotation) { std::unique_ptr<InMemoryDownload> download = download_factory_->Create(guid, request_params, traffic_annotation, this); diff --git a/chromium/components/download/internal/background_service/in_memory_download_driver.h b/chromium/components/download/internal/background_service/in_memory_download_driver.h index 019f7577b05..b1594999c72 100644 --- a/chromium/components/download/internal/background_service/in_memory_download_driver.h +++ b/chromium/components/download/internal/background_service/in_memory_download_driver.h @@ -12,6 +12,7 @@ #include "base/macros.h" #include "components/download/internal/background_service/in_memory_download.h" +#include "services/network/public/mojom/url_loader_factory.mojom.h" namespace download { @@ -21,7 +22,7 @@ class InMemoryDownload; class InMemoryDownloadFactory : public InMemoryDownload::Factory { public: InMemoryDownloadFactory( - scoped_refptr<net::URLRequestContextGetter> request_context_getter, + network::mojom::URLLoaderFactory* url_loader_factory, BlobTaskProxy::BlobContextGetter blob_context_getter, scoped_refptr<base::SingleThreadTaskRunner> io_task_runner); ~InMemoryDownloadFactory() override; @@ -34,7 +35,8 @@ class InMemoryDownloadFactory : public InMemoryDownload::Factory { const net::NetworkTrafficAnnotationTag& traffic_annotation, InMemoryDownload::Delegate* delegate) override; - scoped_refptr<net::URLRequestContextGetter> request_context_getter_; + network::mojom::URLLoaderFactory* url_loader_factory_; + BlobTaskProxy::BlobContextGetter blob_context_getter_; scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; @@ -59,6 +61,7 @@ class InMemoryDownloadDriver : public DownloadDriver, const RequestParams& request_params, const std::string& guid, const base::FilePath& file_path, + scoped_refptr<network::ResourceRequestBody> post_body, const net::NetworkTrafficAnnotationTag& traffic_annotation) override; void Remove(const std::string& guid) override; void Pause(const std::string& guid) override; diff --git a/chromium/components/download/internal/background_service/in_memory_download_driver_unittest.cc b/chromium/components/download/internal/background_service/in_memory_download_driver_unittest.cc index 7199e1c04ae..cc7af9b4570 100644 --- a/chromium/components/download/internal/background_service/in_memory_download_driver_unittest.cc +++ b/chromium/components/download/internal/background_service/in_memory_download_driver_unittest.cc @@ -8,6 +8,7 @@ #include "components/download/internal/background_service/test/mock_download_driver_client.h" #include "net/traffic_annotation/network_traffic_annotation_test_helper.h" +#include "services/network/public/cpp/resource_request_body.h" #include "storage/browser/blob/blob_data_handle.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" @@ -46,7 +47,7 @@ class TestInMemoryDownload : public InMemoryDownload { void Start() override {} void Pause() override {} void Resume() override {} - std::unique_ptr<storage::BlobDataHandle> ResultAsBlob() override { + std::unique_ptr<storage::BlobDataHandle> ResultAsBlob() const override { return nullptr; } size_t EstimateMemoryUsage() const override { return 0u; } @@ -111,7 +112,7 @@ class InMemoryDownloadDriverTest : public testing::Test { void Start(const std::string& guid) { RequestParams params; base::FilePath path; - driver()->Start(params, guid, path, TRAFFIC_ANNOTATION_FOR_TESTS); + driver()->Start(params, guid, path, nullptr, TRAFFIC_ANNOTATION_FOR_TESTS); } private: diff --git a/chromium/components/download/internal/background_service/in_memory_download_unittest.cc b/chromium/components/download/internal/background_service/in_memory_download_unittest.cc index 145f697358c..c89ae3a0cdc 100644 --- a/chromium/components/download/internal/background_service/in_memory_download_unittest.cc +++ b/chromium/components/download/internal/background_service/in_memory_download_unittest.cc @@ -4,16 +4,15 @@ #include "components/download/internal/background_service/in_memory_download.h" -#include "base/files/file_util.h" #include "base/guid.h" -#include "base/path_service.h" +#include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/test/bind_test_util.h" #include "base/test/scoped_task_environment.h" #include "base/threading/thread.h" -#include "net/test/embedded_test_server/embedded_test_server.h" +#include "net/base/io_buffer.h" #include "net/traffic_annotation/network_traffic_annotation_test_helper.h" -#include "net/url_request/url_request_test_util.h" +#include "services/network/test/test_url_loader_factory.h" #include "storage/browser/blob/blob_reader.h" #include "storage/browser/blob/blob_storage_context.h" #include "testing/gmock/include/gmock/gmock.h" @@ -22,18 +21,9 @@ namespace download { namespace { -// Posts a dummy task on |task_runner| and wait for its callback, to drain all -// previous tasks on |task_runner|. -void DrainPreviousTasks( - scoped_refptr<base::SingleThreadTaskRunner> task_runner) { - base::RunLoop run_loop; - - auto dummy_task = []() {}; - task_runner->PostTaskAndReply(FROM_HERE, base::BindRepeating(dummy_task), - run_loop.QuitClosure()); - - run_loop.Run(); -} +const char kTestDownloadData[] = + "In earlier tellings, the dog had a better reputation than the cat, " + "however the president veto it."; // Dummy callback used for IO_PENDING state in blob operations, this is not // called when the blob operation is done, but called when chained with other @@ -78,14 +68,10 @@ class InMemoryDownloadTest : public testing::Test { ~InMemoryDownloadTest() override = default; void SetUp() override { - test_server_.ServeFilesFromDirectory(GetTestDataDirectory()); - ASSERT_TRUE(test_server_.Start()); - io_thread_.reset(new base::Thread("Network and Blob IO thread")); base::Thread::Options options(base::MessageLoop::TYPE_IO, 0); io_thread_->StartWithOptions(options); - request_context_getter_ = - new net::TestURLRequestContextGetter(io_thread_->task_runner()); + base::RunLoop loop; io_thread_->task_runner()->PostTask( FROM_HERE, base::BindLambdaForTesting([&]() { @@ -103,17 +89,11 @@ class InMemoryDownloadTest : public testing::Test { } protected: - base::FilePath GetTestDataDirectory() { - base::FilePath test_data_dir; - EXPECT_TRUE(base::PathService::Get(base::DIR_SOURCE_ROOT, &test_data_dir)); - return test_data_dir.AppendASCII("components/test/data/download"); - } - // Helper method to create a download with request_params. void CreateDownload(const RequestParams& request_params) { download_ = std::make_unique<InMemoryDownloadImpl>( base::GenerateGUID(), request_params, TRAFFIC_ANNOTATION_FOR_TESTS, - delegate(), request_context_getter_, + delegate(), &url_loader_factory_, base::BindRepeating(&BlobStorageContextGetter, blob_storage_context_.get()), io_thread_->task_runner()); @@ -121,9 +101,8 @@ class InMemoryDownloadTest : public testing::Test { InMemoryDownload* download() { return download_.get(); } MockDelegate* delegate() { return &mock_delegate_; } - net::EmbeddedTestServer* test_server() { return &test_server_; } - scoped_refptr<net::TestURLRequestContextGetter> request_context_getter() { - return request_context_getter_; + network::TestURLLoaderFactory* url_loader_factory() { + return &url_loader_factory_; } // Verifies if data read from |blob| is identical as |expected|. @@ -174,56 +153,27 @@ class InMemoryDownloadTest : public testing::Test { std::unique_ptr<InMemoryDownloadImpl> download_; MockDelegate mock_delegate_; - // Used by URLFetcher network backend. - scoped_refptr<net::TestURLRequestContextGetter> request_context_getter_; + // Used by SimpleURLLoader network backend. + network::TestURLLoaderFactory url_loader_factory_; // Memory backed blob storage that can never page to disk. std::unique_ptr<storage::BlobStorageContext> blob_storage_context_; - net::EmbeddedTestServer test_server_; - DISALLOW_COPY_AND_ASSIGN(InMemoryDownloadTest); }; TEST_F(InMemoryDownloadTest, DownloadTest) { RequestParams request_params; - request_params.url = test_server()->GetURL("/text_data.json"); - CreateDownload(request_params); - download()->Start(); - delegate()->WaitForCompletion(); - - EXPECT_EQ(InMemoryDownload::State::COMPLETE, download()->state()); - auto blob = download()->ResultAsBlob(); - - std::string expected; - EXPECT_TRUE(ReadFileToString( - GetTestDataDirectory().AppendASCII("text_data.json"), &expected)); - VerifyBlobData(expected, blob.get()); -} - -TEST_F(InMemoryDownloadTest, PauseResume) { - RequestParams request_params; - request_params.url = test_server()->GetURL("/text_data.json"); CreateDownload(request_params); - - // Pause before sending request. - download()->Pause(); + url_loader_factory()->AddResponse(request_params.url.spec(), + kTestDownloadData); + // TODO(xingliu): More tests on pause/resume. download()->Start(); - - // Force to return ERR_IO_PENDING on network thread in - // InMemoryDownloadImpl::ResponseWriter::Write. - DrainPreviousTasks(request_context_getter()->GetNetworkTaskRunner()); - - download()->Resume(); delegate()->WaitForCompletion(); EXPECT_EQ(InMemoryDownload::State::COMPLETE, download()->state()); auto blob = download()->ResultAsBlob(); - - std::string expected; - EXPECT_TRUE(ReadFileToString( - GetTestDataDirectory().AppendASCII("text_data.json"), &expected)); - VerifyBlobData(expected, blob.get()); + VerifyBlobData(kTestDownloadData, blob.get()); } } // namespace diff --git a/chromium/components/download/internal/background_service/logger_impl.cc b/chromium/components/download/internal/background_service/logger_impl.cc index bd2fe130e80..50154003aa8 100644 --- a/chromium/components/download/internal/background_service/logger_impl.cc +++ b/chromium/components/download/internal/background_service/logger_impl.cc @@ -6,7 +6,6 @@ #include <memory> -#include "base/memory/ptr_util.h" #include "base/values.h" #include "components/download/internal/background_service/driver_entry.h" #include "components/download/internal/background_service/entry.h" @@ -53,6 +52,8 @@ std::string ClientToString(DownloadClient client) { return "BackgroundFetch"; case DownloadClient::DEBUGGING: return "Debugging"; + case DownloadClient::MOUNTAIN_INTERNAL: + return "MountainInternal"; case DownloadClient::BOUNDARY: // Intentional fallthrough. default: NOTREACHED(); diff --git a/chromium/components/download/internal/background_service/proto/entry.proto b/chromium/components/download/internal/background_service/proto/entry.proto index 2e7cfbbb28d..04c40b27443 100644 --- a/chromium/components/download/internal/background_service/proto/entry.proto +++ b/chromium/components/download/internal/background_service/proto/entry.proto @@ -21,7 +21,8 @@ enum DownloadClient { OFFLINE_PAGE_PREFETCH = 4; BACKGROUND_FETCH = 5; DEBUGGING = 6; - BOUNDARY = 7; + MOUNTAIN_INTERNAL = 7; + BOUNDARY = 8; } // Stores the request params, internal state, metrics and metadata associated @@ -66,4 +67,5 @@ message Entry { optional uint32 cleanup_attempt_count = 13; optional uint32 resumption_count = 14; + optional bool has_upload_data = 15; } diff --git a/chromium/components/download/internal/background_service/proto_conversions.cc b/chromium/components/download/internal/background_service/proto_conversions.cc index ef1398eb058..942e12f2d00 100644 --- a/chromium/components/download/internal/background_service/proto_conversions.cc +++ b/chromium/components/download/internal/background_service/proto_conversions.cc @@ -68,6 +68,8 @@ protodb::DownloadClient ProtoConversions::DownloadClientToProto( return protodb::DownloadClient::BACKGROUND_FETCH; case DownloadClient::DEBUGGING: return protodb::DownloadClient::DEBUGGING; + case DownloadClient::MOUNTAIN_INTERNAL: + return protodb::DownloadClient::MOUNTAIN_INTERNAL; case DownloadClient::BOUNDARY: return protodb::DownloadClient::BOUNDARY; } @@ -93,6 +95,8 @@ DownloadClient ProtoConversions::DownloadClientFromProto( return DownloadClient::BACKGROUND_FETCH; case protodb::DownloadClient::DEBUGGING: return DownloadClient::DEBUGGING; + case protodb::DownloadClient::MOUNTAIN_INTERNAL: + return DownloadClient::MOUNTAIN_INTERNAL; case protodb::DownloadClient::BOUNDARY: return DownloadClient::BOUNDARY; } @@ -281,6 +285,7 @@ Entry ProtoConversions::EntryFromProto(const protodb::Entry& proto) { entry.attempt_count = proto.attempt_count(); entry.resumption_count = proto.resumption_count(); entry.cleanup_attempt_count = proto.cleanup_attempt_count(); + entry.has_upload_data = proto.has_upload_data(); entry.traffic_annotation = net::MutableNetworkTrafficAnnotationTag({proto.traffic_annotation()}); entry.bytes_downloaded = proto.bytes_downloaded(); @@ -305,6 +310,7 @@ protodb::Entry ProtoConversions::EntryToProto(const Entry& entry) { proto.set_attempt_count(entry.attempt_count); proto.set_resumption_count(entry.resumption_count); proto.set_cleanup_attempt_count(entry.cleanup_attempt_count); + proto.set_has_upload_data(entry.has_upload_data); proto.set_traffic_annotation(entry.traffic_annotation.unique_id_hash_code); proto.set_bytes_downloaded(entry.bytes_downloaded); return proto; diff --git a/chromium/components/download/internal/background_service/stats.cc b/chromium/components/download/internal/background_service/stats.cc index 720a87f57e8..524ecffe63f 100644 --- a/chromium/components/download/internal/background_service/stats.cc +++ b/chromium/components/download/internal/background_service/stats.cc @@ -91,6 +91,8 @@ std::string ClientToHistogramSuffix(DownloadClient client) { return "BackgroundFetch"; case DownloadClient::DEBUGGING: return "Debugging"; + case DownloadClient::MOUNTAIN_INTERNAL: + return "MountainInternal"; case DownloadClient::BOUNDARY: NOTREACHED(); break; @@ -119,6 +121,8 @@ std::string CompletionTypeToHistogramSuffix(CompletionType type) { return "OutOfRetries"; case CompletionType::OUT_OF_RESUMPTIONS: return "OutOfResumptions"; + case CompletionType::UPLOAD_TIMEOUT: + return "UploadTimeout"; case CompletionType::COUNT: NOTREACHED(); } @@ -165,9 +169,12 @@ void LogDatabaseRecords(Entry::State state, uint32_t record_count) { } // Helper method to log the pause reason for a particular download. -void LogDownloadPauseReason(PauseReason reason) { - UMA_HISTOGRAM_ENUMERATION("Download.Service.PauseReason", reason, - PauseReason::COUNT); +void LogDownloadPauseReason(PauseReason reason, bool on_upload_data_received) { + std::string name(on_upload_data_received + ? "Download.Service.OnUploadDataReceived.PauseReason" + : "Download.Service.PauseReason"); + + base::UmaHistogramEnumeration(name, reason, PauseReason::COUNT); } } // namespace @@ -259,23 +266,24 @@ void LogDownloadCompletion(CompletionType type, uint64_t file_size_bytes) { base::UmaHistogramCustomCounts(name, file_size_kb, 1, kMaxFileSizeKB, 50); } -void LogDownloadPauseReason(bool unmet_device_criteria, - bool pause_by_client, - bool external_navigation, - bool external_download) { - LogDownloadPauseReason(PauseReason::ANY); +void LogDownloadPauseReason(const DownloadBlockageStatus& blockage_status, + bool currently_in_progress) { + LogDownloadPauseReason(PauseReason::ANY, currently_in_progress); - if (unmet_device_criteria) - LogDownloadPauseReason(PauseReason::UNMET_DEVICE_CRITERIA); + if (blockage_status.blocked_by_criteria) + LogDownloadPauseReason(PauseReason::UNMET_DEVICE_CRITERIA, + currently_in_progress); - if (pause_by_client) - LogDownloadPauseReason(PauseReason::PAUSE_BY_CLIENT); + if (blockage_status.entry_not_active) + LogDownloadPauseReason(PauseReason::PAUSE_BY_CLIENT, currently_in_progress); - if (external_navigation) - LogDownloadPauseReason(PauseReason::EXTERNAL_NAVIGATION); + if (blockage_status.blocked_by_navigation) + LogDownloadPauseReason(PauseReason::EXTERNAL_NAVIGATION, + currently_in_progress); - if (external_download) - LogDownloadPauseReason(PauseReason::EXTERNAL_DOWNLOAD); + if (blockage_status.blocked_by_downloads) + LogDownloadPauseReason(PauseReason::EXTERNAL_DOWNLOAD, + currently_in_progress); } void LogModelOperationResult(ModelAction action, bool success) { @@ -375,5 +383,15 @@ void LogEntryRetryCount(uint32_t retry_count) { UMA_HISTOGRAM_COUNTS_100("Download.Service.Entry.RetryCount", retry_count); } +void LogEntryRemovedWhileWaitingForUploadResponse() { + UMA_HISTOGRAM_BOOLEAN("Download.Service.Upload.EntryNotFound", true); +} + +void LogHasUploadData(DownloadClient client, bool has_upload_data) { + std::string name("Download.Service.Upload.HasUploadData"); + name.append(".").append(ClientToHistogramSuffix(client)); + base::UmaHistogramBoolean(name, has_upload_data); +} + } // namespace stats } // namespace download diff --git a/chromium/components/download/internal/background_service/stats.h b/chromium/components/download/internal/background_service/stats.h index ab9d2b530f7..7a0e40fbbff 100644 --- a/chromium/components/download/internal/background_service/stats.h +++ b/chromium/components/download/internal/background_service/stats.h @@ -8,6 +8,7 @@ #include "base/files/file.h" #include "base/optional.h" #include "components/download/internal/background_service/controller.h" +#include "components/download/internal/background_service/download_blockage_status.h" #include "components/download/internal/background_service/driver_entry.h" #include "components/download/internal/background_service/entry.h" #include "components/download/public/background_service/clients.h" @@ -174,10 +175,9 @@ void LogDownloadCompletion(CompletionType type, uint64_t file_size_bytes); // Logs various pause reasons for download. The reasons are not mutually // exclusive. -void LogDownloadPauseReason(bool unmet_device_criteria, - bool pause_by_client, - bool external_navigation, - bool external_download); +void LogDownloadPauseReason(const DownloadBlockageStatus& blockage_status, + bool on_upload_data_received); +void LogEntryRemovedWhileWaitingForUploadResponse(); // Logs statistics about the result of a model operation. Used to track failure // cases. @@ -224,6 +224,9 @@ void LogEntryResumptionCount(uint32_t resume_count); // At the time of a retry, logs which retry attempt count this is. void LogEntryRetryCount(uint32_t retry_count); +// Records whether the entry was an upload. +void LogHasUploadData(DownloadClient client, bool has_upload_data); + } // namespace stats } // namespace download diff --git a/chromium/components/download/internal/background_service/test/BUILD.gn b/chromium/components/download/internal/background_service/test/BUILD.gn index 6dc81dd7605..0ed3e442f15 100644 --- a/chromium/components/download/internal/background_service/test/BUILD.gn +++ b/chromium/components/download/internal/background_service/test/BUILD.gn @@ -43,5 +43,6 @@ source_set("test_support") { deps = [ "//components/download/internal/background_service:internal", + "//services/network/public/cpp", ] } diff --git a/chromium/components/download/internal/common/BUILD.gn b/chromium/components/download/internal/common/BUILD.gn index 98de310f8c9..bb3a9299559 100644 --- a/chromium/components/download/internal/common/BUILD.gn +++ b/chromium/components/download/internal/common/BUILD.gn @@ -3,42 +3,104 @@ # found in the LICENSE file. source_set("internal") { - visibility = [ "//components/download/public/common:public" ] + visibility = [ + ":for_tests", + "//components/download/public/common:public", + ] configs += [ "//components/download/public/common:components_download_implementation", ] sources = [ + "base_file.cc", + "base_file_posix.cc", + "base_file_win.cc", "download_create_info.cc", + "download_file_factory.cc", + "download_file_impl.cc", "download_interrupt_reasons_impl.cc", + "download_interrupt_reasons_utils.cc", + "download_item_impl.cc", + "download_item_impl_delegate.cc", + "download_job.cc", + "download_job_factory.cc", + "download_job_impl.cc", + "download_job_impl.h", + "download_response_handler.cc", "download_stats.cc", "download_task_runner.cc", "download_ukm_helper.cc", + "download_url_loader_factory_getter.cc", "download_utils.cc", + "download_worker.cc", + "download_worker.h", + "parallel_download_job.cc", + "parallel_download_job.h", + "parallel_download_utils.cc", + "parallel_download_utils.h", "rate_estimator.cc", + "resource_downloader.cc", + "save_package_download_job.cc", + "save_package_download_job.h", + "stream_handle_input_stream.cc", + "url_download_handler_factory.cc", + ] + + public_deps = [ + "//services/network/public/mojom", ] deps = [ "//base", + "//components/download/downloader/in_progress", + "//components/download/public/common:interfaces", + "//components/download/quarantine", + "//mojo/public/c/system", "//net", "//services/metrics/public/cpp:ukm_builders", + "//services/network/public/cpp", ] } +# tests need to access both public and internal sources. So in the component +# build case, we exclude the internal dependency as it is included in the +# test_support target under public. +group("for_tests") { + visibility = [ ":unit_tests" ] + if (!is_component_build) { + public_deps = [ + ":internal", + ] + } +} + source_set("unit_tests") { testonly = true + if (is_component_build) { + check_includes = false + } + sources = [ + "base_file_unittest.cc", + "base_file_win_unittest.cc", + "download_file_unittest.cc", + "download_item_impl_unittest.cc", "download_stats_unittest.cc", "download_ukm_helper_unittest.cc", + "parallel_download_job_unittest.cc", + "parallel_download_utils_unittest.cc", "rate_estimator_unittest.cc", ] deps = [ + ":for_tests", "//base/test:test_support", - "//components/download/public/common:public", + "//components/download/public/common:test_support", "//components/ukm:test_support", + "//crypto", + "//net", "//services/metrics/public/cpp:ukm_builders", "//testing/gmock", "//testing/gtest", diff --git a/chromium/components/download/internal/common/DEPS b/chromium/components/download/internal/common/DEPS index 1a9dc4abaa7..65c70055f64 100644 --- a/chromium/components/download/internal/common/DEPS +++ b/chromium/components/download/internal/common/DEPS @@ -1,9 +1,21 @@ include_rules = [ "-content", + "+components/download/downloader/in_progress", "+components/download/public/common", + "+components/download/quarantine", "+components/ukm/test_ukm_recorder.h", + "+crypto", + "+mojo/public/c/system", + "+net/base/filename_util.h", + "+net/base/load_flags.h", + "+net/base/io_buffer.h", + "+net/base/net_errors.h", "+net/http/http_content_disposition.h", + "+net/http/http_request_headers.h", "+net/http/http_response_headers.h", + "+net/http/http_status_code.h", "+net/http/http_util.h", + "+net/traffic_annotation/network_traffic_annotation.h", "+services/metrics/public/cpp", + "+services/network/public/cpp", ] diff --git a/chromium/components/download/internal/common/base_file.cc b/chromium/components/download/internal/common/base_file.cc new file mode 100644 index 00000000000..fce4f343809 --- /dev/null +++ b/chromium/components/download/internal/common/base_file.cc @@ -0,0 +1,528 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/base_file.h" + +#include <memory> +#include <utility> + +#include "base/bind.h" +#include "base/files/file.h" +#include "base/files/file_util.h" +#include "base/format_macros.h" +#include "base/logging.h" +#include "base/macros.h" +#include "base/pickle.h" +#include "base/strings/stringprintf.h" +#include "base/threading/thread_restrictions.h" +#include "base/trace_event/trace_event.h" +#include "build/build_config.h" +#include "components/download/public/common/download_interrupt_reasons_utils.h" +#include "components/download/public/common/download_item.h" +#include "components/download/public/common/download_stats.h" +#include "components/download/quarantine/quarantine.h" +#include "crypto/secure_hash.h" + +#define CONDITIONAL_TRACE(trace) \ + do { \ + if (download_id_ != DownloadItem::kInvalidId) \ + TRACE_EVENT_##trace; \ + } while (0) + +namespace download { + +namespace { +class FileErrorData : public base::trace_event::ConvertableToTraceFormat { + public: + FileErrorData(const char* operation, + int os_error, + DownloadInterruptReason interrupt_reason) + : operation_(operation), + os_error_(os_error), + interrupt_reason_(interrupt_reason) {} + + ~FileErrorData() override = default; + + void AppendAsTraceFormat(std::string* out) const override { + out->append("{"); + out->append( + base::StringPrintf("\"operation\":\"%s\",", operation_.c_str())); + out->append(base::StringPrintf("\"os_error\":\"%d\",", os_error_)); + out->append(base::StringPrintf( + "\"interrupt_reason\":\"%s\",", + DownloadInterruptReasonToString(interrupt_reason_).c_str())); + out->append("}"); + } + + private: + std::string operation_; + int os_error_; + DownloadInterruptReason interrupt_reason_; + DISALLOW_COPY_AND_ASSIGN(FileErrorData); +}; +} // namespace + +BaseFile::BaseFile(uint32_t download_id) : download_id_(download_id) { + DETACH_FROM_SEQUENCE(sequence_checker_); +} + +BaseFile::~BaseFile() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + if (detached_) + Close(); + else + Cancel(); // Will delete the file. +} + +DownloadInterruptReason BaseFile::Initialize( + const base::FilePath& full_path, + const base::FilePath& default_directory, + base::File file, + int64_t bytes_so_far, + const std::string& hash_so_far, + std::unique_ptr<crypto::SecureHash> hash_state, + bool is_sparse_file, + int64_t* const bytes_wasted) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(!detached_); + + if (full_path.empty()) { + base::FilePath temp_file; + if ((default_directory.empty() || + !base::CreateTemporaryFileInDir(default_directory, &temp_file)) && + !base::CreateTemporaryFile(&temp_file)) { + return LogInterruptReason("Unable to create", 0, + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED); + } + full_path_ = temp_file; + } else { + full_path_ = full_path; + } + + bytes_so_far_ = bytes_so_far; + secure_hash_ = std::move(hash_state); + is_sparse_file_ = is_sparse_file; + // Sparse file doesn't validate hash. + if (is_sparse_file_) + secure_hash_.reset(); + file_ = std::move(file); + + return Open(hash_so_far, bytes_wasted); +} + +DownloadInterruptReason BaseFile::AppendDataToFile(const char* data, + size_t data_len) { + DCHECK(!is_sparse_file_); + return WriteDataToFile(bytes_so_far_, data, data_len); +} + +DownloadInterruptReason BaseFile::WriteDataToFile(int64_t offset, + const char* data, + size_t data_len) { + // NOTE(benwells): The above DCHECK won't be present in release builds, + // so we log any occurences to see how common this error is in the wild. + if (detached_) + RecordDownloadCount(APPEND_TO_DETACHED_FILE_COUNT); + + if (!file_.IsValid()) + return LogInterruptReason("No file stream on append", 0, + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED); + + // TODO(phajdan.jr): get rid of this check. + if (data_len == 0) + return DOWNLOAD_INTERRUPT_REASON_NONE; + + // Use nestable async event instead of sync event so that all the writes + // belong to the same download will be grouped together. + CONDITIONAL_TRACE( + NESTABLE_ASYNC_BEGIN0("download", "DownloadFileWrite", download_id_)); + int write_result = file_.Write(offset, data, data_len); + DCHECK_NE(0, write_result); + + // Report errors on file writes. + if (write_result < 0) + return LogSystemError("Write", logging::GetLastSystemErrorCode()); + + DCHECK_EQ(static_cast<size_t>(write_result), data_len); + + if (bytes_so_far_ != offset) { + // A hole is created in the file. + is_sparse_file_ = true; + secure_hash_.reset(); + } + + bytes_so_far_ += data_len; + CONDITIONAL_TRACE(NESTABLE_ASYNC_END1("download", "DownloadFileWrite", + download_id_, "bytes", data_len)); + + if (secure_hash_) + secure_hash_->Update(data, data_len); + + return DOWNLOAD_INTERRUPT_REASON_NONE; +} + +DownloadInterruptReason BaseFile::Rename(const base::FilePath& new_path) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DownloadInterruptReason rename_result = DOWNLOAD_INTERRUPT_REASON_NONE; + + // If the new path is same as the old one, there is no need to perform the + // following renaming logic. + if (new_path == full_path_) + return DOWNLOAD_INTERRUPT_REASON_NONE; + + // Save the information whether the download is in progress because + // it will be overwritten by closing the file. + bool was_in_progress = in_progress(); + + Close(); + + CONDITIONAL_TRACE(BEGIN2("download", "DownloadFileRename", "old_filename", + full_path_.AsUTF8Unsafe(), "new_filename", + new_path.AsUTF8Unsafe())); + + base::CreateDirectory(new_path.DirName()); + + // A simple rename wouldn't work here since we want the file to have + // permissions / security descriptors that makes sense in the new directory. + rename_result = MoveFileAndAdjustPermissions(new_path); + + CONDITIONAL_TRACE(END0("download", "DownloadFileRename")); + + if (rename_result == DOWNLOAD_INTERRUPT_REASON_NONE) + full_path_ = new_path; + + // Re-open the file if we were still using it regardless of the interrupt + // reason. + DownloadInterruptReason open_result = DOWNLOAD_INTERRUPT_REASON_NONE; + if (was_in_progress) { + int64_t bytes_wasted; // Do not need to use bytes_wasted. + open_result = Open(std::string(), &bytes_wasted); + } + + return rename_result == DOWNLOAD_INTERRUPT_REASON_NONE ? open_result + : rename_result; +} + +void BaseFile::Detach() { + detached_ = true; + CONDITIONAL_TRACE( + INSTANT0("download", "DownloadFileDetached", TRACE_EVENT_SCOPE_THREAD)); +} + +void BaseFile::Cancel() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(!detached_); + + CONDITIONAL_TRACE( + INSTANT0("download", "DownloadCancelled", TRACE_EVENT_SCOPE_THREAD)); + + Close(); + + if (!full_path_.empty()) { + CONDITIONAL_TRACE( + INSTANT0("download", "DownloadFileDeleted", TRACE_EVENT_SCOPE_THREAD)); + base::DeleteFile(full_path_, false); + } + + Detach(); +} + +std::unique_ptr<crypto::SecureHash> BaseFile::Finish() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + // TODO(qinmin): verify that all the holes have been filled. + if (is_sparse_file_) + CalculatePartialHash(std::string()); + Close(); + return std::move(secure_hash_); +} + +std::string BaseFile::DebugString() const { + return base::StringPrintf( + "{ " + " full_path_ = \"%" PRFilePath + "\"" + " bytes_so_far_ = %" PRId64 " detached_ = %c }", + full_path_.value().c_str(), bytes_so_far_, detached_ ? 'T' : 'F'); +} + +DownloadInterruptReason BaseFile::CalculatePartialHash( + const std::string& hash_to_expect) { + secure_hash_ = crypto::SecureHash::Create(crypto::SecureHash::SHA256); + + if (bytes_so_far_ == 0) + return DOWNLOAD_INTERRUPT_REASON_NONE; + + if (file_.Seek(base::File::FROM_BEGIN, 0) != 0) + return LogSystemError("Seek partial file", + logging::GetLastSystemErrorCode()); + + const size_t kMinBufferSize = secure_hash_->GetHashLength(); + const size_t kMaxBufferSize = 1024 * 512; + static_assert(kMaxBufferSize <= std::numeric_limits<int>::max(), + "kMaxBufferSize must fit on an int"); + + // The size of the buffer is: + // - at least kMinBufferSize so that we can use it to hold the hash as well. + // - at most kMaxBufferSize so that there's a reasonable bound. + // - not larger than |bytes_so_far_| unless bytes_so_far_ is less than the + // hash size. + std::vector<char> buffer(std::max<int64_t>( + kMinBufferSize, std::min<int64_t>(kMaxBufferSize, bytes_so_far_))); + + int64_t current_position = 0; + while (current_position < bytes_so_far_) { + // While std::min needs to work with int64_t, the result is always at most + // kMaxBufferSize, which fits on an int. + int bytes_to_read = + std::min<int64_t>(buffer.size(), bytes_so_far_ - current_position); + int length = file_.ReadAtCurrentPos(&buffer.front(), bytes_to_read); + if (length == -1) { + return LogInterruptReason("Reading partial file", + logging::GetLastSystemErrorCode(), + DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT); + } + + if (length == 0) + break; + + secure_hash_->Update(&buffer.front(), length); + current_position += length; + } + + if (current_position != bytes_so_far_) { + return LogInterruptReason("Verifying prefix hash", 0, + DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT); + } + + if (!hash_to_expect.empty()) { + DCHECK_EQ(secure_hash_->GetHashLength(), hash_to_expect.size()); + DCHECK(buffer.size() >= secure_hash_->GetHashLength()); + std::unique_ptr<crypto::SecureHash> partial_hash(secure_hash_->Clone()); + partial_hash->Finish(&buffer.front(), buffer.size()); + + if (memcmp(&buffer.front(), hash_to_expect.c_str(), + partial_hash->GetHashLength())) { + return LogInterruptReason("Verifying prefix hash", 0, + DOWNLOAD_INTERRUPT_REASON_FILE_HASH_MISMATCH); + } + } + + return DOWNLOAD_INTERRUPT_REASON_NONE; +} + +DownloadInterruptReason BaseFile::Open(const std::string& hash_so_far, + int64_t* const bytes_wasted) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(!detached_); + DCHECK(!full_path_.empty()); + + // Create a new file if it is not provided. + if (!file_.IsValid()) { + file_.Initialize(full_path_, base::File::FLAG_OPEN_ALWAYS | + base::File::FLAG_WRITE | + base::File::FLAG_READ); + if (!file_.IsValid()) { + return LogNetError("Open/Initialize File", + net::FileErrorToNetError(file_.error_details())); + } + } + + CONDITIONAL_TRACE(NESTABLE_ASYNC_BEGIN2( + "download", "DownloadFileOpen", download_id_, "file_name", + full_path_.AsUTF8Unsafe(), "bytes_so_far", bytes_so_far_)); + + // For sparse file, skip hash validation. + if (is_sparse_file_) { + if (file_.GetLength() < bytes_so_far_) { + *bytes_wasted = bytes_so_far_; + ClearFile(); + return LogInterruptReason("File has fewer written bytes than expected", 0, + DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT); + } + return DOWNLOAD_INTERRUPT_REASON_NONE; + } + + if (!secure_hash_) { + DownloadInterruptReason reason = CalculatePartialHash(hash_so_far); + if (reason != DOWNLOAD_INTERRUPT_REASON_NONE) { + *bytes_wasted = file_.GetLength(); + ClearFile(); + return reason; + } + } + + int64_t file_size = file_.Seek(base::File::FROM_END, 0); + if (file_size < 0) { + logging::SystemErrorCode error = logging::GetLastSystemErrorCode(); + ClearFile(); + return LogSystemError("Seeking to end", error); + } else if (file_size > bytes_so_far_) { + // The file is larger than we expected. + // This is OK, as long as we don't use the extra. + // Truncate the file. + *bytes_wasted = file_size - bytes_so_far_; + if (!file_.SetLength(bytes_so_far_) || + file_.Seek(base::File::FROM_BEGIN, bytes_so_far_) != bytes_so_far_) { + logging::SystemErrorCode error = logging::GetLastSystemErrorCode(); + *bytes_wasted = file_size; + ClearFile(); + return LogSystemError("Truncating to last known offset", error); + } + } else if (file_size < bytes_so_far_) { + // The file is shorter than we expected. Our hashes won't be valid. + *bytes_wasted = bytes_so_far_; + ClearFile(); + return LogInterruptReason("Unable to seek to last written point", 0, + DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT); + } + + return DOWNLOAD_INTERRUPT_REASON_NONE; +} + +void BaseFile::Close() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + if (file_.IsValid()) { + // Currently we don't really care about the return value, since if it fails + // theres not much we can do. But we might in the future. + file_.Flush(); + ClearFile(); + } +} + +void BaseFile::ClearFile() { + // This should only be called when we have a stream. + DCHECK(file_.IsValid()); + file_.Close(); + CONDITIONAL_TRACE( + NESTABLE_ASYNC_END0("download", "DownloadFileOpen", download_id_)); +} + +DownloadInterruptReason BaseFile::LogNetError(const char* operation, + net::Error error) { + CONDITIONAL_TRACE(INSTANT2("download", "DownloadFileError", + TRACE_EVENT_SCOPE_THREAD, "operation", operation, + "net_error", error)); + return ConvertNetErrorToInterruptReason(error, DOWNLOAD_INTERRUPT_FROM_DISK); +} + +DownloadInterruptReason BaseFile::LogSystemError( + const char* operation, + logging::SystemErrorCode os_error) { + // There's no direct conversion from a system error to an interrupt reason. + base::File::Error file_error = base::File::OSErrorToFileError(os_error); + return LogInterruptReason(operation, os_error, + ConvertFileErrorToInterruptReason(file_error)); +} + +DownloadInterruptReason BaseFile::LogInterruptReason( + const char* operation, + int os_error, + DownloadInterruptReason reason) { + DVLOG(1) << __func__ << "() operation:" << operation + << " os_error:" << os_error + << " reason:" << DownloadInterruptReasonToString(reason); + auto error_data = + std::make_unique<FileErrorData>(operation, os_error, reason); + CONDITIONAL_TRACE(INSTANT1("download", "DownloadFileError", + TRACE_EVENT_SCOPE_THREAD, "file_error", + std::move(error_data))); + return reason; +} + +#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) + +namespace { + +// Given a source and a referrer, determines the "safest" URL that can be used +// to determine the authority of the download source. Returns an empty URL if no +// HTTP/S URL can be determined for the <|source_url|, |referrer_url|> pair. +GURL GetEffectiveAuthorityURL(const GURL& source_url, + const GURL& referrer_url) { + if (source_url.is_valid()) { + // http{,s} has an authority and are supported. + if (source_url.SchemeIsHTTPOrHTTPS()) + return source_url; + + // If the download source is file:// ideally we should copy the MOTW from + // the original file, but given that Chrome/Chromium places strict + // restrictions on which schemes can reference file:// URLs, this code is + // going to assume that at this point it's okay to treat this download as + // being from the local system. + if (source_url.SchemeIsFile()) + return source_url; + + // ftp:// has an authority. + if (source_url.SchemeIs(url::kFtpScheme)) + return source_url; + } + + if (referrer_url.is_valid() && referrer_url.SchemeIsHTTPOrHTTPS()) + return referrer_url; + + return GURL(); +} + +} // namespace + +DownloadInterruptReason BaseFile::AnnotateWithSourceInformation( + const std::string& client_guid, + const GURL& source_url, + const GURL& referrer_url) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(!detached_); + DCHECK(!full_path_.empty()); + + CONDITIONAL_TRACE(BEGIN0("download", "DownloadFileAnnotate")); + QuarantineFileResult result = QuarantineFile( + full_path_, GetEffectiveAuthorityURL(source_url, referrer_url), + referrer_url, client_guid); + CONDITIONAL_TRACE(END0("download", "DownloadFileAnnotate")); + + switch (result) { + case QuarantineFileResult::OK: + return DOWNLOAD_INTERRUPT_REASON_NONE; + case QuarantineFileResult::VIRUS_INFECTED: + return DOWNLOAD_INTERRUPT_REASON_FILE_VIRUS_INFECTED; + case QuarantineFileResult::SECURITY_CHECK_FAILED: + return DOWNLOAD_INTERRUPT_REASON_FILE_SECURITY_CHECK_FAILED; + case QuarantineFileResult::BLOCKED_BY_POLICY: + return DOWNLOAD_INTERRUPT_REASON_FILE_BLOCKED; + case QuarantineFileResult::ACCESS_DENIED: + return DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + + case QuarantineFileResult::FILE_MISSING: + // Don't have a good interrupt reason here. This return code means that + // the file at |full_path_| went missing before QuarantineFile got to look + // at it. Not expected to happen, but we've seen instances where a file + // goes missing immediately after BaseFile closes the handle. + // + // Intentionally using a different error message than + // SECURITY_CHECK_FAILED in order to distinguish the two. + return DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + + case QuarantineFileResult::ANNOTATION_FAILED: + // This means that the mark-of-the-web couldn't be applied. The file is + // already on the file system under its final target name. + // + // Causes of failed annotations typically aren't transient. E.g. the + // target file system may not support extended attributes or alternate + // streams. We are going to allow these downloads to progress on the + // assumption that failures to apply MOTW can't reliably be introduced + // remotely. + return DOWNLOAD_INTERRUPT_REASON_NONE; + } + return DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; +} +#else // !OS_WIN && !OS_MACOSX && !OS_LINUX +DownloadInterruptReason BaseFile::AnnotateWithSourceInformation( + const std::string& client_guid, + const GURL& source_url, + const GURL& referrer_url) { + return DOWNLOAD_INTERRUPT_REASON_NONE; +} +#endif + +} // namespace download diff --git a/chromium/components/download/internal/common/base_file_posix.cc b/chromium/components/download/internal/common/base_file_posix.cc new file mode 100644 index 00000000000..f3d6aa91592 --- /dev/null +++ b/chromium/components/download/internal/common/base_file_posix.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/base_file.h" + +#include <errno.h> + +#include "base/files/file_util.h" +#include "components/download/public/common/download_interrupt_reasons.h" + +namespace download { + +DownloadInterruptReason BaseFile::MoveFileAndAdjustPermissions( + const base::FilePath& new_path) { + // Similarly, on Unix, we're moving a temp file created with permissions 600 + // to |new_path|. Here, we try to fix up the destination file with appropriate + // permissions. + struct stat st; + // First check the file existence and create an empty file if it doesn't + // exist. + if (!base::PathExists(new_path)) { + int write_error = base::WriteFile(new_path, "", 0); + if (write_error < 0) + return LogSystemError("WriteFile", errno); + } + int stat_error = stat(new_path.value().c_str(), &st); + bool stat_succeeded = (stat_error == 0); + if (!stat_succeeded) + LogSystemError("stat", errno); + + if (!base::Move(full_path_, new_path)) + return LogSystemError("Move", errno); + + if (stat_succeeded) { + // On Windows file systems (FAT, NTFS), chmod fails. This is OK. + int chmod_error = chmod(new_path.value().c_str(), st.st_mode); + if (chmod_error < 0) + LogSystemError("chmod", errno); + } + return DOWNLOAD_INTERRUPT_REASON_NONE; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/base_file_unittest.cc b/chromium/components/download/internal/common/base_file_unittest.cc new file mode 100644 index 00000000000..ea6459064de --- /dev/null +++ b/chromium/components/download/internal/common/base_file_unittest.cc @@ -0,0 +1,760 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/base_file.h" + +#include <stddef.h> +#include <stdint.h> +#include <utility> + +#include "base/files/file.h" +#include "base/files/file_util.h" +#include "base/files/scoped_temp_dir.h" +#include "base/logging.h" +#include "base/macros.h" +#include "base/strings/string_number_conversions.h" +#include "base/test/test_file_util.h" +#include "build/build_config.h" +#include "components/download/public/common/download_interrupt_reasons.h" +#include "components/download/public/common/download_item.h" +#include "crypto/secure_hash.h" +#include "crypto/sha2.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace download { +namespace { + +const char kTestData1[] = "Let's write some data to the file!\n"; +const char kTestData2[] = "Writing more data.\n"; +const char kTestData3[] = "Final line."; +const char kTestData4[] = "supercalifragilisticexpialidocious"; +const int kTestDataLength1 = arraysize(kTestData1) - 1; +const int kTestDataLength2 = arraysize(kTestData2) - 1; +const int kTestDataLength3 = arraysize(kTestData3) - 1; +const int kTestDataLength4 = arraysize(kTestData4) - 1; +int64_t kTestDataBytesWasted = 0; + +// SHA-256 hash of kTestData1 (excluding terminating NUL). +const uint8_t kHashOfTestData1[] = { + 0x0b, 0x2d, 0x3f, 0x3f, 0x79, 0x43, 0xad, 0x64, 0xb8, 0x60, 0xdf, + 0x94, 0xd0, 0x5c, 0xb5, 0x6a, 0x8a, 0x97, 0xc6, 0xec, 0x57, 0x68, + 0xb5, 0xb7, 0x0b, 0x93, 0x0c, 0x5a, 0xa7, 0xfa, 0x9a, 0xde}; + +// SHA-256 hash of kTestData1 ++ kTestData2 ++ kTestData3 (excluding terminating +// NUL). +const uint8_t kHashOfTestData1To3[] = { + 0xcb, 0xf6, 0x8b, 0xf1, 0x0f, 0x80, 0x03, 0xdb, 0x86, 0xb3, 0x13, + 0x43, 0xaf, 0xac, 0x8c, 0x71, 0x75, 0xbd, 0x03, 0xfb, 0x5f, 0xc9, + 0x05, 0x65, 0x0f, 0x8c, 0x80, 0xaf, 0x08, 0x74, 0x43, 0xa8}; + +} // namespace + +class BaseFileTest : public testing::Test { + public: + static const unsigned char kEmptySha256Hash[crypto::kSHA256Length]; + + BaseFileTest() + : expect_file_survives_(false), + expect_in_progress_(true), + expected_error_(DOWNLOAD_INTERRUPT_REASON_NONE) {} + + void SetUp() override { + ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); + base_file_.reset(new BaseFile(DownloadItem::kInvalidId)); + } + + void TearDown() override { + EXPECT_FALSE(base_file_->in_progress()); + if (!expected_error_) { + EXPECT_EQ(static_cast<int64_t>(expected_data_.size()), + base_file_->bytes_so_far()); + } + + base::FilePath full_path = base_file_->full_path(); + + if (!expected_data_.empty() && !expected_error_) { + // Make sure the data has been properly written to disk. + std::string disk_data; + EXPECT_TRUE(base::ReadFileToString(full_path, &disk_data)); + EXPECT_EQ(expected_data_, disk_data); + } + + base_file_.reset(); + + EXPECT_EQ(expect_file_survives_, base::PathExists(full_path)); + } + + bool InitializeFile() { + DownloadInterruptReason result = base_file_->Initialize( + base::FilePath(), temp_dir_.GetPath(), base::File(), 0, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, &kTestDataBytesWasted); + EXPECT_EQ(expected_error_, result); + return result == DOWNLOAD_INTERRUPT_REASON_NONE; + } + + bool AppendDataToFile(const std::string& data) { + EXPECT_EQ(expect_in_progress_, base_file_->in_progress()); + DownloadInterruptReason result = + base_file_->AppendDataToFile(data.data(), data.size()); + if (result == DOWNLOAD_INTERRUPT_REASON_NONE) + EXPECT_TRUE(expect_in_progress_) << " result = " << result; + + EXPECT_EQ(expected_error_, result); + if (base_file_->in_progress()) { + expected_data_ += data; + if (expected_error_ == DOWNLOAD_INTERRUPT_REASON_NONE) { + EXPECT_EQ(static_cast<int64_t>(expected_data_.size()), + base_file_->bytes_so_far()); + } + } + return result == DOWNLOAD_INTERRUPT_REASON_NONE; + } + + void set_expected_data(const std::string& data) { expected_data_ = data; } + + // Helper functions. + // Create a file. Returns the complete file path. + base::FilePath CreateTestFile() { + base::FilePath file_name; + BaseFile file(DownloadItem::kInvalidId); + + EXPECT_EQ( + DOWNLOAD_INTERRUPT_REASON_NONE, + file.Initialize(base::FilePath(), temp_dir_.GetPath(), base::File(), 0, + std::string(), std::unique_ptr<crypto::SecureHash>(), + false, &kTestDataBytesWasted)); + file_name = file.full_path(); + EXPECT_NE(base::FilePath::StringType(), file_name.value()); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + file.AppendDataToFile(kTestData4, kTestDataLength4)); + + // Keep the file from getting deleted when existing_file_name is deleted. + file.Detach(); + + return file_name; + } + + // Create a file with the specified file name. + void CreateFileWithName(const base::FilePath& file_name) { + EXPECT_NE(base::FilePath::StringType(), file_name.value()); + BaseFile duplicate_file(download::DownloadItem::kInvalidId); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + duplicate_file.Initialize(file_name, temp_dir_.GetPath(), + base::File(), 0, std::string(), + std::unique_ptr<crypto::SecureHash>(), + false, &kTestDataBytesWasted)); + // Write something into it. + duplicate_file.AppendDataToFile(kTestData4, kTestDataLength4); + // Detach the file so it isn't deleted on destruction of |duplicate_file|. + duplicate_file.Detach(); + } + + int64_t CurrentSpeedAtTime(base::TimeTicks current_time) { + EXPECT_TRUE(base_file_.get()); + return base_file_->CurrentSpeedAtTime(current_time); + } + + base::TimeTicks StartTick() { + EXPECT_TRUE(base_file_.get()); + return base_file_->start_tick_; + } + + void set_expected_error(DownloadInterruptReason err) { + expected_error_ = err; + } + + void ExpectPermissionError(DownloadInterruptReason err) { + EXPECT_TRUE(err == DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR || + err == DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED) + << "Interrupt reason = " << err; + } + + template <size_t SZ> + static void ExpectHashValue(const uint8_t (&expected_hash)[SZ], + std::unique_ptr<crypto::SecureHash> hash_state) { + std::vector<uint8_t> hash_value(hash_state->GetHashLength()); + hash_state->Finish(&hash_value.front(), hash_value.size()); + ASSERT_EQ(SZ, hash_value.size()); + EXPECT_EQ(0, memcmp(expected_hash, &hash_value.front(), hash_value.size())); + } + + protected: + // BaseClass instance we are testing. + std::unique_ptr<BaseFile> base_file_; + + // Temporary directory for renamed downloads. + base::ScopedTempDir temp_dir_; + + // Expect the file to survive deletion of the BaseFile instance. + bool expect_file_survives_; + + // Expect the file to be in progress. + bool expect_in_progress_; + + private: + // Keep track of what data should be saved to the disk file. + std::string expected_data_; + DownloadInterruptReason expected_error_; +}; + +// This will initialize the entire array to zero. +const unsigned char BaseFileTest::kEmptySha256Hash[] = {0}; + +// Test the most basic scenario: just create the object and do a sanity check +// on all its accessors. This is actually a case that rarely happens +// in production, where we would at least Initialize it. +TEST_F(BaseFileTest, CreateDestroy) { + EXPECT_EQ(base::FilePath().value(), base_file_->full_path().value()); +} + +// Cancel the download explicitly. +TEST_F(BaseFileTest, Cancel) { + ASSERT_TRUE(InitializeFile()); + EXPECT_TRUE(base::PathExists(base_file_->full_path())); + base_file_->Cancel(); + EXPECT_FALSE(base::PathExists(base_file_->full_path())); + EXPECT_NE(base::FilePath().value(), base_file_->full_path().value()); +} + +// Write data to the file and detach it, so it doesn't get deleted +// automatically when base_file_ is destructed. +TEST_F(BaseFileTest, WriteAndDetach) { + ASSERT_TRUE(InitializeFile()); + ASSERT_TRUE(AppendDataToFile(kTestData1)); + base_file_->Finish(); + base_file_->Detach(); + expect_file_survives_ = true; +} + +// Write data to the file and detach it, and calculate its sha256 hash. +TEST_F(BaseFileTest, WriteWithHashAndDetach) { + ASSERT_TRUE(InitializeFile()); + ASSERT_TRUE(AppendDataToFile(kTestData1)); + ExpectHashValue(kHashOfTestData1, base_file_->Finish()); + base_file_->Detach(); + expect_file_survives_ = true; +} + +// Rename the file after writing to it, then detach. +TEST_F(BaseFileTest, WriteThenRenameAndDetach) { + ASSERT_TRUE(InitializeFile()); + + base::FilePath initial_path(base_file_->full_path()); + EXPECT_TRUE(base::PathExists(initial_path)); + base::FilePath new_path(temp_dir_.GetPath().AppendASCII("NewFile")); + EXPECT_FALSE(base::PathExists(new_path)); + + ASSERT_TRUE(AppendDataToFile(kTestData1)); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, base_file_->Rename(new_path)); + EXPECT_FALSE(base::PathExists(initial_path)); + EXPECT_TRUE(base::PathExists(new_path)); + + ExpectHashValue(kHashOfTestData1, base_file_->Finish()); + base_file_->Detach(); + expect_file_survives_ = true; +} + +// Write data to the file once. +TEST_F(BaseFileTest, SingleWrite) { + ASSERT_TRUE(InitializeFile()); + ASSERT_TRUE(AppendDataToFile(kTestData1)); + ExpectHashValue(kHashOfTestData1, base_file_->Finish()); +} + +// Write data to the file multiple times. +TEST_F(BaseFileTest, MultipleWrites) { + ASSERT_TRUE(InitializeFile()); + ASSERT_TRUE(AppendDataToFile(kTestData1)); + ASSERT_TRUE(AppendDataToFile(kTestData2)); + ASSERT_TRUE(AppendDataToFile(kTestData3)); + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +// Write data to the file multiple times, interrupt it, and continue using +// another file. Calculate the resulting combined sha256 hash. +TEST_F(BaseFileTest, MultipleWritesInterruptedWithHash) { + ASSERT_TRUE(InitializeFile()); + // Write some data + ASSERT_TRUE(AppendDataToFile(kTestData1)); + ASSERT_TRUE(AppendDataToFile(kTestData2)); + // Get the hash state and file name. + std::unique_ptr<crypto::SecureHash> hash_state = base_file_->Finish(); + + base::FilePath new_file_path(temp_dir_.GetPath().Append( + base::FilePath(FILE_PATH_LITERAL("second_file")))); + + ASSERT_TRUE(base::CopyFile(base_file_->full_path(), new_file_path)); + + // Create another file + BaseFile second_file(download::DownloadItem::kInvalidId); + ASSERT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + second_file.Initialize(new_file_path, base::FilePath(), + base::File(), base_file_->bytes_so_far(), + std::string(), std::move(hash_state), false, + &kTestDataBytesWasted)); + std::string data(kTestData3); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + second_file.AppendDataToFile(data.data(), data.size())); + ExpectHashValue(kHashOfTestData1To3, second_file.Finish()); +} + +// Rename the file after all writes to it. +TEST_F(BaseFileTest, WriteThenRename) { + ASSERT_TRUE(InitializeFile()); + + base::FilePath initial_path(base_file_->full_path()); + EXPECT_TRUE(base::PathExists(initial_path)); + base::FilePath new_path(temp_dir_.GetPath().AppendASCII("NewFile")); + EXPECT_FALSE(base::PathExists(new_path)); + + ASSERT_TRUE(AppendDataToFile(kTestData1)); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, base_file_->Rename(new_path)); + EXPECT_FALSE(base::PathExists(initial_path)); + EXPECT_TRUE(base::PathExists(new_path)); + + ExpectHashValue(kHashOfTestData1, base_file_->Finish()); +} + +// Rename the file while the download is still in progress. +TEST_F(BaseFileTest, RenameWhileInProgress) { + ASSERT_TRUE(InitializeFile()); + + base::FilePath initial_path(base_file_->full_path()); + EXPECT_TRUE(base::PathExists(initial_path)); + base::FilePath new_path(temp_dir_.GetPath().AppendASCII("NewFile")); + EXPECT_FALSE(base::PathExists(new_path)); + + ASSERT_TRUE(AppendDataToFile(kTestData1)); + + EXPECT_TRUE(base_file_->in_progress()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, base_file_->Rename(new_path)); + EXPECT_FALSE(base::PathExists(initial_path)); + EXPECT_TRUE(base::PathExists(new_path)); + + ASSERT_TRUE(AppendDataToFile(kTestData2)); + ASSERT_TRUE(AppendDataToFile(kTestData3)); + + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +// Test that a failed rename reports the correct error. +TEST_F(BaseFileTest, RenameWithError) { + ASSERT_TRUE(InitializeFile()); + + // TestDir is a subdirectory in |temp_dir_| that we will make read-only so + // that the rename will fail. + base::FilePath test_dir(temp_dir_.GetPath().AppendASCII("TestDir")); + ASSERT_TRUE(base::CreateDirectory(test_dir)); + + base::FilePath new_path(test_dir.AppendASCII("TestFile")); + EXPECT_FALSE(base::PathExists(new_path)); + + { + base::FilePermissionRestorer restore_permissions_for(test_dir); + ASSERT_TRUE(base::MakeFileUnwritable(test_dir)); + ExpectPermissionError(base_file_->Rename(new_path)); + } + + base_file_->Finish(); +} + +// Test that if a rename fails for an in-progress BaseFile, it remains writeable +// and renameable. +TEST_F(BaseFileTest, RenameWithErrorInProgress) { + ASSERT_TRUE(InitializeFile()); + + base::FilePath test_dir(temp_dir_.GetPath().AppendASCII("TestDir")); + ASSERT_TRUE(base::CreateDirectory(test_dir)); + + base::FilePath new_path(test_dir.AppendASCII("TestFile")); + EXPECT_FALSE(base::PathExists(new_path)); + + // Write some data to start with. + ASSERT_TRUE(AppendDataToFile(kTestData1)); + ASSERT_TRUE(base_file_->in_progress()); + + base::FilePath old_path = base_file_->full_path(); + + { + base::FilePermissionRestorer restore_permissions_for(test_dir); + ASSERT_TRUE(base::MakeFileUnwritable(test_dir)); + ExpectPermissionError(base_file_->Rename(new_path)); + + // The file should still be open and we should be able to continue writing + // to it. + ASSERT_TRUE(base_file_->in_progress()); + ASSERT_TRUE(AppendDataToFile(kTestData2)); + ASSERT_EQ(old_path.value(), base_file_->full_path().value()); + + // Try to rename again, just for kicks. It should still fail. + ExpectPermissionError(base_file_->Rename(new_path)); + } + + // Now that TestDir is writeable again, we should be able to successfully + // rename the file. + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, base_file_->Rename(new_path)); + ASSERT_EQ(new_path.value(), base_file_->full_path().value()); + ASSERT_TRUE(AppendDataToFile(kTestData3)); + + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +// Test that a failed write reports an error. +TEST_F(BaseFileTest, WriteWithError) { + base::FilePath path; + ASSERT_TRUE(base::CreateTemporaryFile(&path)); + + // Pass a file handle which was opened without the WRITE flag. + // This should result in an error when writing. + base::File file(path, base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ); + base_file_.reset(new BaseFile(download::DownloadItem::kInvalidId)); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(path, base::FilePath(), std::move(file), 0, + std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); +#if defined(OS_WIN) + set_expected_error(DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED); +#elif defined(OS_POSIX) + set_expected_error(DOWNLOAD_INTERRUPT_REASON_FILE_FAILED); +#endif + ASSERT_FALSE(AppendDataToFile(kTestData1)); + base_file_->Finish(); +} + +// Try to write to uninitialized file. +TEST_F(BaseFileTest, UninitializedFile) { + expect_in_progress_ = false; + set_expected_error(DOWNLOAD_INTERRUPT_REASON_FILE_FAILED); + EXPECT_FALSE(AppendDataToFile(kTestData1)); +} + +// Create two |BaseFile|s with the same file, and attempt to write to both. +// Overwrite base_file_ with another file with the same name and +// non-zero contents, and make sure the last file to close 'wins'. +TEST_F(BaseFileTest, DuplicateBaseFile) { + ASSERT_TRUE(InitializeFile()); + + // Create another |BaseFile| referring to the file that |base_file_| owns. + CreateFileWithName(base_file_->full_path()); + + ASSERT_TRUE(AppendDataToFile(kTestData1)); + base_file_->Finish(); +} + +// Create a file and append to it. +TEST_F(BaseFileTest, AppendToBaseFile) { + // Create a new file. + base::FilePath existing_file_name = CreateTestFile(); + set_expected_data(kTestData4); + + // Use the file we've just created. + base_file_.reset(new BaseFile(download::DownloadItem::kInvalidId)); + ASSERT_EQ( + DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(existing_file_name, base::FilePath(), base::File(), + kTestDataLength4, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + + const base::FilePath file_name = base_file_->full_path(); + EXPECT_NE(base::FilePath::StringType(), file_name.value()); + + // Write into the file. + EXPECT_TRUE(AppendDataToFile(kTestData1)); + + base_file_->Finish(); + base_file_->Detach(); + expect_file_survives_ = true; +} + +// Create a read-only file and attempt to write to it. +TEST_F(BaseFileTest, ReadonlyBaseFile) { + // Create a new file. + base::FilePath readonly_file_name = CreateTestFile(); + + // Restore permissions to the file when we are done with this test. + base::FilePermissionRestorer restore_permissions(readonly_file_name); + + // Make it read-only. + EXPECT_TRUE(base::MakeFileUnwritable(readonly_file_name)); + + // Try to overwrite it. + base_file_.reset(new BaseFile(download::DownloadItem::kInvalidId)); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED, + base_file_->Initialize(readonly_file_name, base::FilePath(), + base::File(), 0, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + + expect_in_progress_ = false; + + const base::FilePath file_name = base_file_->full_path(); + EXPECT_NE(base::FilePath::StringType(), file_name.value()); + + // Write into the file. + set_expected_error(DOWNLOAD_INTERRUPT_REASON_FILE_FAILED); + EXPECT_FALSE(AppendDataToFile(kTestData1)); + + base_file_->Finish(); + base_file_->Detach(); + expect_file_survives_ = true; +} + +// Open an existing file and continue writing to it. The hash of the partial +// file is known and matches the existing contents. +TEST_F(BaseFileTest, ExistingBaseFileKnownHash) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + ASSERT_EQ(kTestDataLength1, + base::WriteFile(file_path, kTestData1, kTestDataLength1)); + + std::string hash_so_far(std::begin(kHashOfTestData1), + std::end(kHashOfTestData1)); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kTestDataLength1, hash_so_far, + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_data(kTestData1); + ASSERT_TRUE(AppendDataToFile(kTestData2)); + ASSERT_TRUE(AppendDataToFile(kTestData3)); + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +// Open an existing file and continue writing to it. The hash of the partial +// file is unknown. +TEST_F(BaseFileTest, ExistingBaseFileUnknownHash) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + ASSERT_EQ(kTestDataLength1, + base::WriteFile(file_path, kTestData1, kTestDataLength1)); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kTestDataLength1, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_data(kTestData1); + ASSERT_TRUE(AppendDataToFile(kTestData2)); + ASSERT_TRUE(AppendDataToFile(kTestData3)); + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +// Open an existing file. The contentsof the file doesn't match the known hash. +TEST_F(BaseFileTest, ExistingBaseFileIncorrectHash) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + ASSERT_EQ(kTestDataLength2, + base::WriteFile(file_path, kTestData2, kTestDataLength2)); + + std::string hash_so_far(std::begin(kHashOfTestData1), + std::end(kHashOfTestData1)); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_HASH_MISMATCH, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kTestDataLength2, hash_so_far, + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_error(download::DOWNLOAD_INTERRUPT_REASON_FILE_HASH_MISMATCH); +} + +// Open a large existing file with a known hash and continue writing to it. +TEST_F(BaseFileTest, ExistingBaseFileLargeSizeKnownHash) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + std::string big_buffer(1024 * 200, 'a'); + ASSERT_EQ(static_cast<int>(big_buffer.size()), + base::WriteFile(file_path, big_buffer.data(), big_buffer.size())); + + // Hash of partial file (1024*200 * 'a') + const uint8_t kExpectedPartialHash[] = { + 0x4b, 0x4f, 0x0f, 0x46, 0xac, 0x02, 0xd1, 0x77, 0xde, 0xa0, 0xab, + 0x36, 0xa6, 0x6a, 0x65, 0x78, 0x40, 0xe2, 0xfb, 0x98, 0xb2, 0x0b, + 0xb2, 0x7a, 0x68, 0x8d, 0xb4, 0xd8, 0xea, 0x9c, 0xd2, 0x2c}; + + // Hash of entire file (1024*400 * 'a') + const uint8_t kExpectedFullHash[] = { + 0x0c, 0xe9, 0xf6, 0x78, 0x6b, 0x0f, 0x58, 0x49, 0x36, 0xe8, 0x83, + 0xc5, 0x09, 0x16, 0xbc, 0x5e, 0x2d, 0x07, 0x95, 0xb9, 0x42, 0x20, + 0x41, 0x7c, 0xb3, 0x38, 0xd3, 0xf4, 0xe0, 0x78, 0x89, 0x46}; + + ASSERT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + big_buffer.size(), + std::string(std::begin(kExpectedPartialHash), + std::end(kExpectedPartialHash)), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_data(big_buffer); // Contents of the file on Open. + ASSERT_TRUE(AppendDataToFile(big_buffer)); + ExpectHashValue(kExpectedFullHash, base_file_->Finish()); +} + +// Open a large existing file. The contents doesn't match the known hash. +TEST_F(BaseFileTest, ExistingBaseFileLargeSizeIncorrectHash) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + std::string big_buffer(1024 * 200, 'a'); + ASSERT_EQ(static_cast<int>(big_buffer.size()), + base::WriteFile(file_path, big_buffer.data(), big_buffer.size())); + + // Incorrect hash of partial file (1024*200 * 'a') + const uint8_t kExpectedPartialHash[] = { + 0xc2, 0xa9, 0x08, 0xd9, 0x8f, 0x5d, 0xf9, 0x87, 0xad, 0xe4, 0x1b, + 0x5f, 0xce, 0x21, 0x30, 0x67, 0xef, 0x6c, 0xc2, 0x1e, 0xf2, 0x24, + 0x02, 0x12, 0xa4, 0x1e, 0x54, 0xb5, 0xe7, 0xc2, 0x8a, 0xe5}; + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_HASH_MISMATCH, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + big_buffer.size(), + std::string(std::begin(kExpectedPartialHash), + std::end(kExpectedPartialHash)), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_error(download::DOWNLOAD_INTERRUPT_REASON_FILE_HASH_MISMATCH); +} + +// Open an existing file. The size of the file is too short. +TEST_F(BaseFileTest, ExistingBaseFileTooShort) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + ASSERT_EQ(kTestDataLength1, + base::WriteFile(file_path, kTestData1, kTestDataLength1)); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kTestDataLength1 + 1, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_error(download::DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT); +} + +// Open an existing file. The size is larger than expected. +TEST_F(BaseFileTest, ExistingBaseFileKnownHashTooLong) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + std::string contents; + contents.append(kTestData1); + contents.append("Something extra"); + ASSERT_EQ(static_cast<int>(contents.size()), + base::WriteFile(file_path, contents.data(), contents.size())); + + std::string hash_so_far(std::begin(kHashOfTestData1), + std::end(kHashOfTestData1)); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kTestDataLength1, hash_so_far, + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_data(kTestData1); // Our starting position. + ASSERT_TRUE(AppendDataToFile(kTestData2)); + ASSERT_TRUE(AppendDataToFile(kTestData3)); + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +// Open an existing file. The size is large than expected and the hash is +// unknown. +TEST_F(BaseFileTest, ExistingBaseFileUnknownHashTooLong) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + std::string contents; + contents.append(kTestData1); + contents.append("Something extra"); + ASSERT_EQ(static_cast<int>(contents.size()), + base::WriteFile(file_path, contents.data(), contents.size())); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kTestDataLength1, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + set_expected_data(kTestData1); + ASSERT_TRUE(AppendDataToFile(kTestData2)); + ASSERT_TRUE(AppendDataToFile(kTestData3)); + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +// Similar to ExistingBaseFileKnownHashTooLong test, but with a file large +// enough to requre multiple Read()s to complete. This provides additional code +// coverage for the CalculatePartialHash() logic. +TEST_F(BaseFileTest, ExistingBaseFileUnknownHashTooLongForLargeFile) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + const size_t kFileSize = 1024 * 1024; + const size_t kIntermediateSize = kFileSize / 2 + 111; + // |contents| is 100 bytes longer than kIntermediateSize. The latter is the + // expected size. + std::string contents(kIntermediateSize + 100, 'a'); + ASSERT_EQ(static_cast<int>(contents.size()), + base::WriteFile(file_path, contents.data(), contents.size())); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kIntermediateSize, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &kTestDataBytesWasted)); + // The extra bytes should be stripped during Initialize(). + contents.resize(kIntermediateSize, 'a'); + set_expected_data(contents); + std::string new_data(kFileSize - kIntermediateSize, 'a'); + ASSERT_TRUE(AppendDataToFile(new_data)); + const uint8_t kExpectedHash[] = { + 0x9b, 0xc1, 0xb2, 0xa2, 0x88, 0xb2, 0x6a, 0xf7, 0x25, 0x7a, 0x36, + 0x27, 0x7a, 0xe3, 0x81, 0x6a, 0x7d, 0x4f, 0x16, 0xe8, 0x9c, 0x1e, + 0x7e, 0x77, 0xd0, 0xa5, 0xc4, 0x8b, 0xad, 0x62, 0xb3, 0x60, + }; + ExpectHashValue(kExpectedHash, base_file_->Finish()); +} + +// Test that a temporary file is created in the default download directory. +TEST_F(BaseFileTest, CreatedInDefaultDirectory) { + ASSERT_TRUE(base_file_->full_path().empty()); + ASSERT_TRUE(InitializeFile()); + EXPECT_FALSE(base_file_->full_path().empty()); + + // On Windows, CreateTemporaryFileInDir() will cause a path with short names + // to be expanded into a path with long names. Thus temp_dir.GetPath() might + // not + // be a string-wise match to base_file_->full_path().DirName() even though + // they are in the same directory. + base::FilePath temp_file; + ASSERT_TRUE(base::CreateTemporaryFileInDir(temp_dir_.GetPath(), &temp_file)); + ASSERT_FALSE(temp_file.empty()); + EXPECT_STREQ(temp_file.DirName().value().c_str(), + base_file_->full_path().DirName().value().c_str()); + base_file_->Finish(); +} + +TEST_F(BaseFileTest, NoDoubleDeleteAfterCancel) { + ASSERT_TRUE(InitializeFile()); + base::FilePath full_path = base_file_->full_path(); + ASSERT_FALSE(full_path.empty()); + ASSERT_TRUE(base::PathExists(full_path)); + + base_file_->Cancel(); + ASSERT_FALSE(base::PathExists(full_path)); + + const char kData[] = "hello"; + const int kDataLength = static_cast<int>(arraysize(kData) - 1); + ASSERT_EQ(kDataLength, base::WriteFile(full_path, kData, kDataLength)); + // The file that we created here should stick around when the BaseFile is + // destroyed during TearDown. + expect_file_survives_ = true; +} + +// Test that writing data to a sparse file works. +TEST_F(BaseFileTest, WriteDataToSparseFile) { + base::FilePath file_path = temp_dir_.GetPath().AppendASCII("existing"); + std::string contents = kTestData1; + ASSERT_EQ(static_cast<int>(contents.size()), + base::WriteFile(file_path, contents.data(), contents.size())); + + base_file_->Initialize(file_path, base::FilePath(), base::File(), + kTestDataLength1, std::string(), + std::unique_ptr<crypto::SecureHash>(), true, + &kTestDataBytesWasted); + // This will create a hole in the file. + base_file_->WriteDataToFile(kTestDataLength1 + kTestDataLength2, kTestData3, + kTestDataLength3); + // This should fill the hole. + base_file_->WriteDataToFile(kTestDataLength1, kTestData2, kTestDataLength2); + set_expected_data(contents + kTestData2 + kTestData3); + ExpectHashValue(kHashOfTestData1To3, base_file_->Finish()); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/base_file_win.cc b/chromium/components/download/internal/common/base_file_win.cc new file mode 100644 index 00000000000..df78b6956d5 --- /dev/null +++ b/chromium/components/download/internal/common/base_file_win.cc @@ -0,0 +1,325 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/base_file.h" + +#include <windows.h> + +#include <cguid.h> +#include <objbase.h> +#include <shellapi.h> + +#include "base/files/file.h" +#include "base/files/file_util.h" +#include "base/guid.h" +#include "base/macros.h" +#include "base/metrics/histogram_macros.h" +#include "base/strings/utf_string_conversions.h" +#include "base/threading/thread_restrictions.h" +#include "components/download/public/common/download_interrupt_reasons_utils.h" +#include "components/download/public/common/download_stats.h" + +namespace download { +namespace { + +const int kAllSpecialShFileOperationCodes[] = { + // Should be kept in sync with the case statement below. + ERROR_ACCESS_DENIED, + ERROR_SHARING_VIOLATION, + ERROR_INVALID_PARAMETER, + 0x71, + 0x72, + 0x73, + 0x74, + 0x75, + 0x76, + 0x78, + 0x79, + 0x7A, + 0x7C, + 0x7D, + 0x7E, + 0x80, + 0x81, + 0x82, + 0x83, + 0x84, + 0x85, + 0x86, + 0x87, + 0x88, + 0xB7, + 0x402, + 0x10000, + 0x10074, +}; + +// Maps the result of a call to |SHFileOperation()| onto a +// |DownloadInterruptReason|. +// +// These return codes are *old* (as in, DOS era), and specific to +// |SHFileOperation()|. +// They do not appear in any windows header. +// +// See http://msdn.microsoft.com/en-us/library/bb762164(VS.85).aspx. +DownloadInterruptReason MapShFileOperationCodes(int code) { + DownloadInterruptReason result = DOWNLOAD_INTERRUPT_REASON_NONE; + + // Check these pre-Win32 error codes first, then check for matches + // in Winerror.h. + // This switch statement should be kept in sync with the list of codes + // above. + switch (code) { + // Not a pre-Win32 error code; here so that this particular case shows up in + // our histograms. Unfortunately, it is used not just to signal actual + // ACCESS_DENIED errors, but many other errors as well. So we treat it as a + // transient error. + case ERROR_ACCESS_DENIED: // Access is denied. + result = DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + break; + + // This isn't documented but returned from SHFileOperation. Sharing + // violations indicate that another process had the file open while we were + // trying to rename. Anti-virus is believed to be the cause of this error in + // the wild. Treated as a transient error on the assumption that the file + // will be made available for renaming at a later time. + case ERROR_SHARING_VIOLATION: + result = DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + break; + + // This is also not a documented return value of SHFileOperation, but has + // been observed in the wild. We are treating it as a transient error based + // on the cases we have seen so far. See http://crbug.com/368455. + case ERROR_INVALID_PARAMETER: + result = DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + break; + + // The source and destination files are the same file. + // DE_SAMEFILE == 0x71 + case 0x71: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The operation was canceled by the user, or silently canceled if the + // appropriate flags were supplied to SHFileOperation. + // DE_OPCANCELLED == 0x75 + case 0x75: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // Security settings denied access to the source. + // DE_ACCESSDENIEDSRC == 0x78 + case 0x78: + result = DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + break; + + // The source or destination path exceeded or would exceed MAX_PATH. + // DE_PATHTOODEEP == 0x79 + case 0x79: + result = DOWNLOAD_INTERRUPT_REASON_FILE_NAME_TOO_LONG; + break; + + // The path in the source or destination or both was invalid. + // DE_INVALIDFILES == 0x7C + case 0x7C: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The destination path is an existing file. + // DE_FLDDESTISFILE == 0x7E + case 0x7E: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The destination path is an existing folder. + // DE_FILEDESTISFLD == 0x80 + case 0x80: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The name of the file exceeds MAX_PATH. + // DE_FILENAMETOOLONG == 0x81 + case 0x81: + result = DOWNLOAD_INTERRUPT_REASON_FILE_NAME_TOO_LONG; + break; + + // The destination is a read-only CD-ROM, possibly unformatted. + // DE_DEST_IS_CDROM == 0x82 + case 0x82: + result = DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + break; + + // The destination is a read-only DVD, possibly unformatted. + // DE_DEST_IS_DVD == 0x83 + case 0x83: + result = DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + break; + + // The destination is a writable CD-ROM, possibly unformatted. + // DE_DEST_IS_CDRECORD == 0x84 + case 0x84: + result = DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + break; + + // The file involved in the operation is too large for the destination + // media or file system. + // DE_FILE_TOO_LARGE == 0x85 + case 0x85: + result = DOWNLOAD_INTERRUPT_REASON_FILE_TOO_LARGE; + break; + + // The source is a read-only CD-ROM, possibly unformatted. + // DE_SRC_IS_CDROM == 0x86 + case 0x86: + result = DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + break; + + // The source is a read-only DVD, possibly unformatted. + // DE_SRC_IS_DVD == 0x87 + case 0x87: + result = DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + break; + + // The source is a writable CD-ROM, possibly unformatted. + // DE_SRC_IS_CDRECORD == 0x88 + case 0x88: + result = DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + break; + + // MAX_PATH was exceeded during the operation. + // DE_ERROR_MAX == 0xB7 + case 0xB7: + result = DOWNLOAD_INTERRUPT_REASON_FILE_NAME_TOO_LONG; + break; + + // An unspecified error occurred on the destination. + // XE_ERRORONDEST == 0x10000 + case 0x10000: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // Multiple file paths were specified in the source buffer, but only one + // destination file path. + // DE_MANYSRC1DEST == 0x72 + case 0x72: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // Rename operation was specified but the destination path is + // a different directory. Use the move operation instead. + // DE_DIFFDIR == 0x73 + case 0x73: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The source is a root directory, which cannot be moved or renamed. + // DE_ROOTDIR == 0x74 + case 0x74: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The destination is a subtree of the source. + // DE_DESTSUBTREE == 0x76 + case 0x76: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The operation involved multiple destination paths, + // which can fail in the case of a move operation. + // DE_MANYDEST == 0x7A + case 0x7A: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // The source and destination have the same parent folder. + // DE_DESTSAMETREE == 0x7D + case 0x7D: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // An unknown error occurred. This is typically due to an invalid path in + // the source or destination. This error does not occur on Windows Vista + // and later. + // DE_UNKNOWN_ERROR == 0x402 + case 0x402: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + + // Destination is a root directory and cannot be renamed. + // DE_ROOTDIR | ERRORONDEST == 0x10074 + case 0x10074: + result = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + break; + } + + // Narrow down on the reason we're getting some catch-all interrupt reasons. + if (result == DOWNLOAD_INTERRUPT_REASON_FILE_FAILED) { + UMA_HISTOGRAM_CUSTOM_ENUMERATION( + "Download.MapWinShErrorFileFailed", code, + base::CustomHistogram::ArrayToCustomRanges( + kAllSpecialShFileOperationCodes, + arraysize(kAllSpecialShFileOperationCodes))); + } + + if (result == DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED) { + UMA_HISTOGRAM_CUSTOM_ENUMERATION( + "Download.MapWinShErrorAccessDenied", code, + base::CustomHistogram::ArrayToCustomRanges( + kAllSpecialShFileOperationCodes, + arraysize(kAllSpecialShFileOperationCodes))); + } + + if (result == DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR) { + UMA_HISTOGRAM_CUSTOM_ENUMERATION( + "Download.MapWinShErrorTransientError", code, + base::CustomHistogram::ArrayToCustomRanges( + kAllSpecialShFileOperationCodes, + arraysize(kAllSpecialShFileOperationCodes))); + } + + if (result != DOWNLOAD_INTERRUPT_REASON_NONE) + return result; + + // If not one of the above codes, it should be a standard Windows error code. + return ConvertFileErrorToInterruptReason( + base::File::OSErrorToFileError(code)); +} + +} // namespace + +// Renames a file using the SHFileOperation API to ensure that the target file +// gets the correct default security descriptor in the new path. +// Returns a network error, or net::OK for success. +DownloadInterruptReason BaseFile::MoveFileAndAdjustPermissions( + const base::FilePath& new_path) { + base::AssertBlockingAllowed(); + + // The parameters to SHFileOperation must be terminated with 2 NULL chars. + base::FilePath::StringType source = full_path_.value(); + base::FilePath::StringType target = new_path.value(); + + source.append(1, L'\0'); + target.append(1, L'\0'); + + SHFILEOPSTRUCT move_info = {0}; + move_info.wFunc = FO_MOVE; + move_info.pFrom = source.c_str(); + move_info.pTo = target.c_str(); + move_info.fFlags = FOF_SILENT | FOF_NOCONFIRMATION | FOF_NOERRORUI | + FOF_NOCONFIRMMKDIR | FOF_NOCOPYSECURITYATTRIBS; + + int result = SHFileOperation(&move_info); + DownloadInterruptReason interrupt_reason = DOWNLOAD_INTERRUPT_REASON_NONE; + + if (result == 0 && move_info.fAnyOperationsAborted) + interrupt_reason = DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + else if (result != 0) + interrupt_reason = MapShFileOperationCodes(result); + + if (interrupt_reason != DOWNLOAD_INTERRUPT_REASON_NONE) + return LogInterruptReason("SHFileOperation", result, interrupt_reason); + return interrupt_reason; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/base_file_win_unittest.cc b/chromium/components/download/internal/common/base_file_win_unittest.cc new file mode 100644 index 00000000000..bd415ff15bb --- /dev/null +++ b/chromium/components/download/internal/common/base_file_win_unittest.cc @@ -0,0 +1,116 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/base_file.h" + +#include "base/files/file_util.h" +#include "base/files/scoped_temp_dir.h" +#include "components/download/public/common/download_interrupt_reasons.h" +#include "components/download/public/common/download_item.h" +#include "net/base/filename_util.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace download { + +TEST(BaseFileWin, AnnotateWithSourceInformation) { + const base::FilePath::CharType kZoneIdentifierStreamName[] = + FILE_PATH_LITERAL(":Zone.Identifier"); + const char kInternetZoneIdentifierString[] = "[ZoneTransfer]\r\nZoneId=3\r\n"; + + struct { + const char* const url; + const char* const referrer; + bool expected_internet_zone; + } kTestCases[] = { + // Test cases where we expect a MOTW. + {"http://example.com", "http://example.com", true}, + {"", "http://example.com", true}, + {"", "", true}, + {"http://example.com", "", true}, + {"data:text/plain,Foo", "http://example.com", true}, + {"data:text/plain,Foo", "", true}, + {"data:text/plain,Foo", "data:text/plain,Bar", true}, + {"data:text/plain,Foo", "ftp://localhost/foo", true}, + {"http://example.com", "http://localhost/foo", true}, + {"ftp://example.com/foo", "", true}, + + // Test cases where we don't expect a MOTW. These test cases result in + // different behavior across Windows versions. + {"ftp://localhost/foo", "", false}, + {"http://localhost/foo", "", false}, + {"", "http://localhost/foo", false}, + {"file:///exists.txt", "", false}, + {"file:///exists.txt", "http://example.com", false}, + {"file:///does-not-exist.txt", "", false}, + }; + + base::ScopedTempDir target_directory; + ASSERT_TRUE(target_directory.CreateUniqueTempDir()); + + ASSERT_EQ( + 6, base::WriteFile(target_directory.GetPath().AppendASCII("exists.txt"), + "Exists", 6)); + + for (const auto& test_case : kTestCases) { + GURL url(test_case.url); + GURL referrer(test_case.referrer); + + // Resolve file:// URLs relative to our temp directory. + if (url.SchemeIsFile()) { + base::FilePath relative_path = + base::FilePath().AppendASCII(url.path().substr(1)); + url = net::FilePathToFileURL( + target_directory.GetPath().Append(relative_path)); + } + + SCOPED_TRACE(::testing::Message() << "Source URL: " << url.spec() + << " Referrer: " << test_case.referrer); + + BaseFile base_file(download::DownloadItem::kInvalidId); + int64_t bytes_wasted = 0; // unused + ASSERT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file.Initialize(base::FilePath(), target_directory.GetPath(), + base::File(), 0, std::string(), + std::unique_ptr<crypto::SecureHash>(), false, + &bytes_wasted)); + ASSERT_FALSE(base_file.full_path().empty()); + ASSERT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file.Rename( + target_directory.GetPath().AppendASCII("test_file.doc"))); + ASSERT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + base_file.AnnotateWithSourceInformation( + "7B2CEE7C-DC81-4160-86F1-9C968597118F", url, referrer)); + base_file.Detach(); + base_file.Finish(); + + base::FilePath path = base_file.full_path(); + base::FilePath zone_identifier_stream(path.value() + + kZoneIdentifierStreamName); + + ASSERT_TRUE(base::PathExists(path)); + + std::string zone_identifier; + base::ReadFileToString(zone_identifier_stream, &zone_identifier); + + if (test_case.expected_internet_zone) { + EXPECT_STREQ(kInternetZoneIdentifierString, zone_identifier.c_str()); + } else { + // Seeing an unexpected zone identifier is not an error, but we log a + // warning just the same so that such cases can be identified during + // manual testing. + if (zone_identifier == kInternetZoneIdentifierString) { + LOG(WARNING) << "Unexpected internet zone annotation for Source:" + << url.spec() << " Referrer:" << test_case.referrer; + } else if (!zone_identifier.empty()) { + LOG(WARNING) << "Unexpected zone annotation for Source:" << url.spec() + << " Referrer:" << test_case.referrer + << " Annotation:" << std::endl + << zone_identifier; + } + } + base::DeleteFile(path, false); + } +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_file_factory.cc b/chromium/components/download/internal/common/download_file_factory.cc new file mode 100644 index 00000000000..5caf987bb1c --- /dev/null +++ b/chromium/components/download/internal/common/download_file_factory.cc @@ -0,0 +1,25 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_file_factory.h" + +#include <utility> + +#include "components/download/public/common/download_file_impl.h" + +namespace download { + +DownloadFileFactory::~DownloadFileFactory() {} + +DownloadFile* DownloadFileFactory::CreateFile( + std::unique_ptr<DownloadSaveInfo> save_info, + const base::FilePath& default_downloads_directory, + std::unique_ptr<InputStream> stream, + uint32_t download_id, + base::WeakPtr<DownloadDestinationObserver> observer) { + return new DownloadFileImpl(std::move(save_info), default_downloads_directory, + std::move(stream), download_id, observer); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_file_impl.cc b/chromium/components/download/internal/common/download_file_impl.cc new file mode 100644 index 00000000000..7acae0764dd --- /dev/null +++ b/chromium/components/download/internal/common/download_file_impl.cc @@ -0,0 +1,810 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_file_impl.h" + +#include <string> +#include <utility> + +#include "base/bind.h" +#include "base/files/file_util.h" +#include "base/message_loop/message_loop.h" +#include "base/strings/stringprintf.h" +#include "base/threading/sequenced_task_runner_handle.h" +#include "base/time/time.h" +#include "base/values.h" +#include "components/download/internal/common/parallel_download_utils.h" +#include "components/download/public/common/download_create_info.h" +#include "components/download/public/common/download_destination_observer.h" +#include "components/download/public/common/download_interrupt_reasons_utils.h" +#include "components/download/public/common/download_stats.h" +#include "crypto/secure_hash.h" +#include "crypto/sha2.h" +#include "mojo/public/c/system/types.h" +#include "net/base/io_buffer.h" +#include "services/network/public/cpp/features.h" + +namespace download { + +namespace { + +const int kUpdatePeriodMs = 500; +const int kMaxTimeBlockingFileThreadMs = 1000; + +// These constants control the default retry behavior for failing renames. Each +// retry is performed after a delay that is twice the previous delay. The +// initial delay is specified by kInitialRenameRetryDelayMs. +const int kInitialRenameRetryDelayMs = 200; + +// Number of times a failing rename is retried before giving up. +const int kMaxRenameRetries = 3; + +// Because DownloadSaveInfo::kLengthFullContent is 0, we should avoid using +// 0 for length if we found that a stream can no longer write any data. +const int kNoBytesToWrite = -1; + +// Default content length when the potential file size is not yet determined. +const int kUnknownContentLength = -1; + +} // namespace + +DownloadFileImpl::SourceStream::SourceStream( + int64_t offset, + int64_t length, + std::unique_ptr<InputStream> stream) + : offset_(offset), + length_(length), + bytes_written_(0), + finished_(false), + index_(0u), + input_stream_(std::move(stream)) {} + +DownloadFileImpl::SourceStream::~SourceStream() = default; + +void DownloadFileImpl::SourceStream::Initialize() { + input_stream_->Initialize(); +} + +void DownloadFileImpl::SourceStream::OnWriteBytesToDisk(int64_t bytes_write) { + bytes_written_ += bytes_write; +} + +void DownloadFileImpl::SourceStream::TruncateLengthWithWrittenDataBlock( + int64_t offset, + int64_t bytes_written) { + DCHECK_GT(bytes_written, 0); + if (length_ == kNoBytesToWrite) + return; + + if (offset <= offset_) { + if (offset + bytes_written > offset_) { + length_ = kNoBytesToWrite; + finished_ = true; + } + return; + } + + if (length_ == DownloadSaveInfo::kLengthFullContent || + length_ > offset - offset_) { + length_ = offset - offset_; + } +} + +void DownloadFileImpl::SourceStream::RegisterDataReadyCallback( + const mojo::SimpleWatcher::ReadyCallback& callback) { + input_stream_->RegisterDataReadyCallback(callback); +} + +void DownloadFileImpl::SourceStream::ClearDataReadyCallback() { + input_stream_->ClearDataReadyCallback(); +} + +DownloadInterruptReason DownloadFileImpl::SourceStream::GetCompletionStatus() + const { + return input_stream_->GetCompletionStatus(); +} + +void DownloadFileImpl::SourceStream::RegisterCompletionCallback( + DownloadFileImpl::SourceStream::CompletionCallback callback) { + input_stream_->RegisterCompletionCallback( + base::BindOnce(std::move(callback), base::Unretained(this))); +} + +InputStream::StreamState DownloadFileImpl::SourceStream::Read( + scoped_refptr<net::IOBuffer>* data, + size_t* length) { + return input_stream_->Read(data, length); +} + +DownloadFileImpl::DownloadFileImpl( + std::unique_ptr<DownloadSaveInfo> save_info, + const base::FilePath& default_download_directory, + std::unique_ptr<InputStream> stream, + uint32_t download_id, + base::WeakPtr<DownloadDestinationObserver> observer) + : DownloadFileImpl(std::move(save_info), + default_download_directory, + download_id, + observer) { + source_streams_[save_info_->offset] = std::make_unique<SourceStream>( + save_info_->offset, save_info_->length, std::move(stream)); +} + +DownloadFileImpl::DownloadFileImpl( + std::unique_ptr<DownloadSaveInfo> save_info, + const base::FilePath& default_download_directory, + uint32_t download_id, + base::WeakPtr<DownloadDestinationObserver> observer) + : file_(download_id), + save_info_(std::move(save_info)), + default_download_directory_(default_download_directory), + potential_file_length_(kUnknownContentLength), + bytes_seen_(0), + num_active_streams_(0), + record_stream_bandwidth_(false), + bytes_seen_with_parallel_streams_(0), + bytes_seen_without_parallel_streams_(0), + is_paused_(false), + download_id_(download_id), + main_task_runner_(base::MessageLoop::current()->task_runner()), + observer_(observer), + weak_factory_(this) { + TRACE_EVENT_INSTANT0("download", "DownloadFileCreated", + TRACE_EVENT_SCOPE_THREAD); + TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("download", "DownloadFileActive", + download_id); + + DETACH_FROM_SEQUENCE(sequence_checker_); +} + +DownloadFileImpl::~DownloadFileImpl() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + TRACE_EVENT_NESTABLE_ASYNC_END0("download", "DownloadFileActive", + download_id_); +} + +void DownloadFileImpl::Initialize( + InitializeCallback initialize_callback, + const CancelRequestCallback& cancel_request_callback, + const DownloadItem::ReceivedSlices& received_slices, + bool is_parallelizable) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + update_timer_.reset(new base::RepeatingTimer()); + int64_t bytes_so_far = 0; + cancel_request_callback_ = cancel_request_callback; + received_slices_ = received_slices; + + // If the last slice is finished, then we know the actual content size. + if (!received_slices_.empty() && received_slices_.back().finished) { + SetPotentialFileLength(received_slices_.back().offset + + received_slices_.back().received_bytes); + } + + if (IsSparseFile()) { + for (const auto& received_slice : received_slices_) { + bytes_so_far += received_slice.received_bytes; + } + } else { + bytes_so_far = save_info_->offset; + } + int64_t bytes_wasted = 0; + DownloadInterruptReason reason = file_.Initialize( + save_info_->file_path, default_download_directory_, + std::move(save_info_->file), bytes_so_far, + save_info_->hash_of_partial_file, std::move(save_info_->hash_state), + IsSparseFile(), &bytes_wasted); + if (reason != DOWNLOAD_INTERRUPT_REASON_NONE) { + main_task_runner_->PostTask( + FROM_HERE, + base::BindOnce(std::move(initialize_callback), reason, bytes_wasted)); + return; + } + download_start_ = base::TimeTicks::Now(); + last_update_time_ = download_start_; + record_stream_bandwidth_ = is_parallelizable; + + // Primarily to make reset to zero in restart visible to owner. + SendUpdate(); + + main_task_runner_->PostTask( + FROM_HERE, base::BindOnce(std::move(initialize_callback), + DOWNLOAD_INTERRUPT_REASON_NONE, bytes_wasted)); + + // Initial pull from the straw from all source streams. + for (auto& source_stream : source_streams_) + RegisterAndActivateStream(source_stream.second.get()); +} + +void DownloadFileImpl::AddInputStream(std::unique_ptr<InputStream> stream, + int64_t offset, + int64_t length) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + // UI thread may not be notified about completion and detach download file, + // clear up the network request. + if (IsDownloadCompleted()) { + CancelRequest(offset); + return; + } + + source_streams_[offset] = + std::make_unique<SourceStream>(offset, length, std::move(stream)); + OnSourceStreamAdded(source_streams_[offset].get()); +} + +void DownloadFileImpl::OnSourceStreamAdded(SourceStream* source_stream) { + // There are writers at different offsets now, create the received slices + // vector if necessary. + if (received_slices_.empty() && TotalBytesReceived() > 0) { + size_t index = AddOrMergeReceivedSliceIntoSortedArray( + DownloadItem::ReceivedSlice(0, TotalBytesReceived()), received_slices_); + DCHECK_EQ(index, 0u); + } + // If the file is initialized, start to write data, or wait until file opened. + if (file_.in_progress()) + RegisterAndActivateStream(source_stream); +} + +DownloadInterruptReason DownloadFileImpl::WriteDataToFile(int64_t offset, + const char* data, + size_t data_len) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + WillWriteToDisk(data_len); + return file_.WriteDataToFile(offset, data, data_len); +} + +bool DownloadFileImpl::CalculateBytesToWrite(SourceStream* source_stream, + size_t bytes_available_to_write, + size_t* bytes_to_write) { + if (source_stream->length() == kNoBytesToWrite) { + *bytes_to_write = 0; + return true; + } + + // If a new slice finds that its target position has already been written, + // terminate the stream. + if (source_stream->bytes_written() == 0) { + for (const auto& received_slice : received_slices_) { + if (received_slice.offset <= source_stream->offset() && + received_slice.offset + received_slice.received_bytes > + source_stream->offset()) { + *bytes_to_write = 0; + return true; + } + } + } + + if (source_stream->length() != DownloadSaveInfo::kLengthFullContent && + source_stream->bytes_written() + + static_cast<int64_t>(bytes_available_to_write) > + source_stream->length()) { + // Write a partial buffer as the incoming data exceeds the length limit. + *bytes_to_write = source_stream->length() - source_stream->bytes_written(); + return true; + } + + *bytes_to_write = bytes_available_to_write; + return false; +} + +void DownloadFileImpl::RenameAndUniquify( + const base::FilePath& full_path, + const RenameCompletionCallback& callback) { + std::unique_ptr<RenameParameters> parameters( + new RenameParameters(UNIQUIFY, full_path, callback)); + RenameWithRetryInternal(std::move(parameters)); +} + +void DownloadFileImpl::RenameAndAnnotate( + const base::FilePath& full_path, + const std::string& client_guid, + const GURL& source_url, + const GURL& referrer_url, + const RenameCompletionCallback& callback) { + std::unique_ptr<RenameParameters> parameters(new RenameParameters( + ANNOTATE_WITH_SOURCE_INFORMATION, full_path, callback)); + parameters->client_guid = client_guid; + parameters->source_url = source_url; + parameters->referrer_url = referrer_url; + RenameWithRetryInternal(std::move(parameters)); +} + +base::TimeDelta DownloadFileImpl::GetRetryDelayForFailedRename( + int attempt_number) { + DCHECK_GE(attempt_number, 0); + // |delay| starts at kInitialRenameRetryDelayMs and increases by a factor of + // 2 at each subsequent retry. Assumes that |retries_left| starts at + // kMaxRenameRetries. Also assumes that kMaxRenameRetries is less than the + // number of bits in an int. + return base::TimeDelta::FromMilliseconds(kInitialRenameRetryDelayMs) * + (1 << attempt_number); +} + +bool DownloadFileImpl::ShouldRetryFailedRename(DownloadInterruptReason reason) { + return reason == DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; +} + +DownloadInterruptReason DownloadFileImpl::HandleStreamCompletionStatus( + SourceStream* source_stream) { + DownloadInterruptReason reason = source_stream->GetCompletionStatus(); + if (source_stream->length() == DownloadSaveInfo::kLengthFullContent && + !received_slices_.empty() && + (source_stream->offset() == received_slices_.back().offset + + received_slices_.back().received_bytes) && + reason == DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE) { + // We are probably reaching the end of the stream, don't treat this + // as an error. + return DOWNLOAD_INTERRUPT_REASON_NONE; + } + return reason; +} + +void DownloadFileImpl::RenameWithRetryInternal( + std::unique_ptr<RenameParameters> parameters) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + base::FilePath new_path = parameters->new_path; + + if ((parameters->option & UNIQUIFY) && new_path != file_.full_path()) { + int uniquifier = + base::GetUniquePathNumber(new_path, base::FilePath::StringType()); + if (uniquifier > 0) + new_path = new_path.InsertBeforeExtensionASCII( + base::StringPrintf(" (%d)", uniquifier)); + } + + DownloadInterruptReason reason = file_.Rename(new_path); + + // Attempt to retry the rename if possible. If the rename failed and the + // subsequent open also failed, then in_progress() would be false. We don't + // try to retry renames if the in_progress() was false to begin with since we + // have less assurance that the file at file_.full_path() was the one we were + // working with. + if (ShouldRetryFailedRename(reason) && file_.in_progress() && + parameters->retries_left > 0) { + int attempt_number = kMaxRenameRetries - parameters->retries_left; + --parameters->retries_left; + if (parameters->time_of_first_failure.is_null()) + parameters->time_of_first_failure = base::TimeTicks::Now(); + base::SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce(&DownloadFileImpl::RenameWithRetryInternal, + weak_factory_.GetWeakPtr(), std::move(parameters)), + GetRetryDelayForFailedRename(attempt_number)); + return; + } + + if (!parameters->time_of_first_failure.is_null()) { + RecordDownloadFileRenameResultAfterRetry( + base::TimeTicks::Now() - parameters->time_of_first_failure, reason); + } + + if (reason == DOWNLOAD_INTERRUPT_REASON_NONE && + (parameters->option & ANNOTATE_WITH_SOURCE_INFORMATION)) { + // Doing the annotation after the rename rather than before leaves + // a very small window during which the file has the final name but + // hasn't been marked with the Mark Of The Web. However, it allows + // anti-virus scanners on Windows to actually see the data + // (http://crbug.com/127999) under the correct name (which is information + // it uses). + reason = file_.AnnotateWithSourceInformation(parameters->client_guid, + parameters->source_url, + parameters->referrer_url); + } + + if (reason != DOWNLOAD_INTERRUPT_REASON_NONE) { + // Make sure our information is updated, since we're about to + // error out. + SendUpdate(); + + // Null out callback so that we don't do any more stream processing. + // The request that writes to the pipe should be canceled after + // the download being interrupted. + for (auto& stream : source_streams_) + stream.second->ClearDataReadyCallback(); + + new_path.clear(); + } + + main_task_runner_->PostTask( + FROM_HERE, + base::BindOnce(parameters->completion_callback, reason, new_path)); +} + +void DownloadFileImpl::Detach() { + file_.Detach(); +} + +void DownloadFileImpl::Cancel() { + file_.Cancel(); +} + +void DownloadFileImpl::SetPotentialFileLength(int64_t length) { + DCHECK(potential_file_length_ == length || + potential_file_length_ == kUnknownContentLength) + << "Potential file length changed, the download might have updated."; + + if (length < potential_file_length_ || + potential_file_length_ == kUnknownContentLength) { + potential_file_length_ = length; + } + + // TODO(qinmin): interrupt the download if the received bytes are larger + // than content length limit. + LOG_IF(ERROR, TotalBytesReceived() > potential_file_length_) + << "Received data is larger than the content length limit."; +} + +const base::FilePath& DownloadFileImpl::FullPath() const { + return file_.full_path(); +} + +bool DownloadFileImpl::InProgress() const { + return file_.in_progress(); +} + +void DownloadFileImpl::Pause() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + is_paused_ = true; + record_stream_bandwidth_ = false; +} + +void DownloadFileImpl::Resume() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(is_paused_); + is_paused_ = false; + + if (!base::FeatureList::IsEnabled(network::features::kNetworkService)) + return; + + for (auto& stream : source_streams_) { + SourceStream* source_stream = stream.second.get(); + if (!source_stream->is_finished()) { + StreamActive(source_stream, MOJO_RESULT_OK); + } + } +} + +void DownloadFileImpl::StreamActive(SourceStream* source_stream, + MojoResult result) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + if (base::FeatureList::IsEnabled(network::features::kNetworkService) && + is_paused_) + return; + + base::TimeTicks start(base::TimeTicks::Now()); + base::TimeTicks now; + scoped_refptr<net::IOBuffer> incoming_data; + size_t incoming_data_size = 0; + size_t total_incoming_data_size = 0; + size_t num_buffers = 0; + size_t bytes_to_write = 0; + bool should_terminate = false; + InputStream::StreamState state(InputStream::EMPTY); + DownloadInterruptReason reason = DOWNLOAD_INTERRUPT_REASON_NONE; + base::TimeDelta delta( + base::TimeDelta::FromMilliseconds(kMaxTimeBlockingFileThreadMs)); + + // Take care of any file local activity required. + do { + state = source_stream->Read(&incoming_data, &incoming_data_size); + switch (state) { + case InputStream::EMPTY: + should_terminate = (source_stream->length() == kNoBytesToWrite); + break; + case InputStream::HAS_DATA: { + ++num_buffers; + base::TimeTicks write_start(base::TimeTicks::Now()); + should_terminate = CalculateBytesToWrite( + source_stream, incoming_data_size, &bytes_to_write); + DCHECK_GE(incoming_data_size, bytes_to_write); + reason = WriteDataToFile( + source_stream->offset() + source_stream->bytes_written(), + incoming_data.get()->data(), bytes_to_write); + disk_writes_time_ += (base::TimeTicks::Now() - write_start); + bytes_seen_ += bytes_to_write; + total_incoming_data_size += bytes_to_write; + if (reason == DOWNLOAD_INTERRUPT_REASON_NONE) { + int64_t prev_bytes_written = source_stream->bytes_written(); + source_stream->OnWriteBytesToDisk(bytes_to_write); + if (!IsSparseFile()) + break; + // If the write operation creates a new slice, add it to the + // |received_slices_| and update all the entries in + // |source_streams_|. + if (bytes_to_write > 0 && prev_bytes_written == 0) { + AddNewSlice(source_stream->offset(), bytes_to_write); + } else { + received_slices_[source_stream->index()].received_bytes += + bytes_to_write; + } + } + } break; + case InputStream::WAIT_FOR_COMPLETION: + source_stream->RegisterCompletionCallback(base::BindOnce( + &DownloadFileImpl::OnStreamCompleted, weak_factory_.GetWeakPtr())); + break; + case InputStream::COMPLETE: + break; + default: + NOTREACHED(); + break; + } + now = base::TimeTicks::Now(); + } while (state == InputStream::HAS_DATA && + reason == DOWNLOAD_INTERRUPT_REASON_NONE && now - start <= delta && + !should_terminate); + + // If we're stopping to yield the thread, post a task so we come back. + if (state == InputStream::HAS_DATA && now - start > delta && + !should_terminate) { + base::SequencedTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce(&DownloadFileImpl::StreamActive, + weak_factory_.GetWeakPtr(), source_stream, + MOJO_RESULT_OK)); + } + + if (total_incoming_data_size) + RecordFileThreadReceiveBuffers(num_buffers); + + RecordContiguousWriteTime(now - start); + + if (state == InputStream::COMPLETE) + OnStreamCompleted(source_stream); + else + NotifyObserver(source_stream, reason, state, should_terminate); + + TRACE_EVENT_INSTANT2("download", "DownloadStreamDrained", + TRACE_EVENT_SCOPE_THREAD, "stream_size", + total_incoming_data_size, "num_buffers", num_buffers); +} + +void DownloadFileImpl::OnStreamCompleted(SourceStream* source_stream) { + DownloadInterruptReason reason = HandleStreamCompletionStatus(source_stream); + + SendUpdate(); + + NotifyObserver(source_stream, reason, InputStream::COMPLETE, false); +} + +void DownloadFileImpl::NotifyObserver(SourceStream* source_stream, + DownloadInterruptReason reason, + InputStream::StreamState stream_state, + bool should_terminate) { + if (reason != DOWNLOAD_INTERRUPT_REASON_NONE) { + HandleStreamError(source_stream, reason); + } else if (stream_state == InputStream::COMPLETE || should_terminate) { + // Signal successful completion or termination of the current stream. + source_stream->ClearDataReadyCallback(); + source_stream->set_finished(true); + + if (should_terminate) + CancelRequest(source_stream->offset()); + if (source_stream->length() == DownloadSaveInfo::kLengthFullContent) { + // Mark received slice as finished. + if (IsSparseFile() && source_stream->bytes_written() > 0) { + DCHECK_GT(received_slices_.size(), source_stream->index()) + << "Received slice index out of bound!"; + received_slices_[source_stream->index()].finished = true; + } + + SetPotentialFileLength(source_stream->offset() + + source_stream->bytes_written()); + } + num_active_streams_--; + + // Inform observers. + SendUpdate(); + + // All the stream reader are completed, shut down file IO processing. + if (IsDownloadCompleted()) { + RecordFileBandwidth(bytes_seen_, disk_writes_time_, + base::TimeTicks::Now() - download_start_); + if (record_stream_bandwidth_) { + RecordParallelizableDownloadStats( + bytes_seen_with_parallel_streams_, + download_time_with_parallel_streams_, + bytes_seen_without_parallel_streams_, + download_time_without_parallel_streams_, IsSparseFile()); + } + weak_factory_.InvalidateWeakPtrs(); + std::unique_ptr<crypto::SecureHash> hash_state = file_.Finish(); + update_timer_.reset(); + main_task_runner_->PostTask( + FROM_HERE, + base::BindOnce(&DownloadDestinationObserver::DestinationCompleted, + observer_, TotalBytesReceived(), + std::move(hash_state))); + } + } +} + +void DownloadFileImpl::RegisterAndActivateStream(SourceStream* source_stream) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + source_stream->Initialize(); + source_stream->RegisterDataReadyCallback( + base::Bind(&DownloadFileImpl::StreamActive, weak_factory_.GetWeakPtr(), + source_stream)); + // Truncate |source_stream|'s length if necessary. + for (const auto& received_slice : received_slices_) { + source_stream->TruncateLengthWithWrittenDataBlock( + received_slice.offset, received_slice.received_bytes); + } + num_active_streams_++; + StreamActive(source_stream, MOJO_RESULT_OK); +} + +int64_t DownloadFileImpl::TotalBytesReceived() const { + return file_.bytes_so_far(); +} + +void DownloadFileImpl::SendUpdate() { + // TODO(qinmin): For each active stream, add the slice it has written so + // far along with received_slices_. + main_task_runner_->PostTask( + FROM_HERE, + base::BindOnce(&DownloadDestinationObserver::DestinationUpdate, observer_, + TotalBytesReceived(), rate_estimator_.GetCountPerSecond(), + received_slices_)); +} + +void DownloadFileImpl::WillWriteToDisk(size_t data_len) { + if (!update_timer_->IsRunning()) { + update_timer_->Start(FROM_HERE, + base::TimeDelta::FromMilliseconds(kUpdatePeriodMs), + this, &DownloadFileImpl::SendUpdate); + } + rate_estimator_.Increment(data_len); + base::TimeTicks now = base::TimeTicks::Now(); + base::TimeDelta time_elapsed = (now - last_update_time_); + last_update_time_ = now; + if (num_active_streams_ > 1) { + download_time_with_parallel_streams_ += time_elapsed; + bytes_seen_with_parallel_streams_ += data_len; + } else { + download_time_without_parallel_streams_ += time_elapsed; + bytes_seen_without_parallel_streams_ += data_len; + } +} + +void DownloadFileImpl::AddNewSlice(int64_t offset, int64_t length) { + size_t index = AddOrMergeReceivedSliceIntoSortedArray( + DownloadItem::ReceivedSlice(offset, length), received_slices_); + // Check if the slice is added as a new slice, or merged with an existing one. + bool slice_added = (offset == received_slices_[index].offset); + // Update the index of exising SourceStreams. + for (auto& stream : source_streams_) { + SourceStream* source_stream = stream.second.get(); + if (source_stream->offset() > offset) { + if (slice_added && source_stream->bytes_written() > 0) + source_stream->set_index(source_stream->index() + 1); + } else if (source_stream->offset() == offset) { + source_stream->set_index(index); + } else { + source_stream->TruncateLengthWithWrittenDataBlock(offset, length); + } + } +} + +bool DownloadFileImpl::IsDownloadCompleted() { + for (auto& stream : source_streams_) { + if (!stream.second->is_finished()) + return false; + } + + if (!IsSparseFile()) + return true; + + // Verify that all the file slices have been downloaded. + std::vector<DownloadItem::ReceivedSlice> slices_to_download = + FindSlicesToDownload(received_slices_); + if (slices_to_download.size() > 1) { + // If there are 1 or more holes in the file, download is not finished. + // Some streams might not have been added to |source_streams_| yet. + return false; + } + return TotalBytesReceived() == potential_file_length_; +} + +void DownloadFileImpl::HandleStreamError(SourceStream* source_stream, + DownloadInterruptReason reason) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + source_stream->ClearDataReadyCallback(); + source_stream->set_finished(true); + num_active_streams_--; + + // If previous stream has already written data at the starting offset of + // the error stream. The download can complete. + bool can_recover_from_error = (source_stream->length() == kNoBytesToWrite); + + // See if the previous stream can download the full content. + // If the current stream has written some data, length of all preceding + // streams will be truncated. + if (IsSparseFile() && !can_recover_from_error) { + SourceStream* preceding_neighbor = FindPrecedingNeighbor(source_stream); + while (preceding_neighbor) { + if (CanRecoverFromError(source_stream, preceding_neighbor)) { + can_recover_from_error = true; + break; + } + + // If the neighbor cannot recover the error and it has already created + // a slice, just interrupt the download. + if (preceding_neighbor->bytes_written() > 0) + break; + preceding_neighbor = FindPrecedingNeighbor(preceding_neighbor); + } + } + + SendUpdate(); // Make info up to date before error. + + if (!can_recover_from_error) { + // Error case for both upstream source and file write. + // Shut down processing and signal an error to our observer. + // Our observer will clean us up. + weak_factory_.InvalidateWeakPtrs(); + std::unique_ptr<crypto::SecureHash> hash_state = file_.Finish(); + main_task_runner_->PostTask( + FROM_HERE, + base::BindOnce(&DownloadDestinationObserver::DestinationError, + observer_, reason, TotalBytesReceived(), + std::move(hash_state))); + } +} + +bool DownloadFileImpl::IsSparseFile() const { + return source_streams_.size() > 1 || !received_slices_.empty(); +} + +DownloadFileImpl::SourceStream* DownloadFileImpl::FindPrecedingNeighbor( + SourceStream* source_stream) { + int64_t max_preceding_offset = 0; + SourceStream* ret = nullptr; + for (auto& stream : source_streams_) { + int64_t offset = stream.second->offset(); + if (offset < source_stream->offset() && offset >= max_preceding_offset) { + ret = stream.second.get(); + max_preceding_offset = offset; + } + } + return ret; +} + +void DownloadFileImpl::CancelRequest(int64_t offset) { + if (!cancel_request_callback_.is_null()) { + main_task_runner_->PostTask( + FROM_HERE, base::BindOnce(cancel_request_callback_, offset)); + } +} + +void DownloadFileImpl::DebugStates() const { + DVLOG(1) << "### Debugging DownloadFile states:"; + DVLOG(1) << "Total source stream count = " << source_streams_.size(); + for (const auto& stream : source_streams_) { + DVLOG(1) << "Source stream, offset = " << stream.second->offset() + << " , bytes_written = " << stream.second->bytes_written() + << " , is_finished = " << stream.second->is_finished() + << " , length = " << stream.second->length() + << ", index = " << stream.second->index(); + } + + DebugSlicesInfo(received_slices_); +} + +DownloadFileImpl::RenameParameters::RenameParameters( + RenameOption option, + const base::FilePath& new_path, + const RenameCompletionCallback& completion_callback) + : option(option), + new_path(new_path), + retries_left(kMaxRenameRetries), + completion_callback(completion_callback) {} + +DownloadFileImpl::RenameParameters::~RenameParameters() {} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_file_unittest.cc b/chromium/components/download/internal/common/download_file_unittest.cc new file mode 100644 index 00000000000..91c9d212691 --- /dev/null +++ b/chromium/components/download/internal/common/download_file_unittest.cc @@ -0,0 +1,1054 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <stddef.h> +#include <stdint.h> + +#include <utility> +#include <vector> + +#include "base/files/file.h" +#include "base/files/file_util.h" +#include "base/files/scoped_temp_dir.h" +#include "base/location.h" +#include "base/run_loop.h" +#include "base/single_thread_task_runner.h" +#include "base/strings/string_number_conversions.h" +#include "base/test/scoped_task_environment.h" +#include "base/test/test_file_util.h" +#include "base/threading/thread_task_runner_handle.h" +#include "build/build_config.h" +#include "components/download/public/common/download_create_info.h" +#include "components/download/public/common/download_destination_observer.h" +#include "components/download/public/common/download_file_impl.h" +#include "components/download/public/common/download_interrupt_reasons.h" +#include "components/download/public/common/mock_input_stream.h" +#include "net/base/net_errors.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using ::testing::_; +using ::testing::AnyNumber; +using ::testing::DoAll; +using ::testing::InSequence; +using ::testing::Return; +using ::testing::Sequence; +using ::testing::SetArgPointee; +using ::testing::StrictMock; + +namespace download { +namespace { + +// Struct for SourceStream states verification. +struct SourceStreamTestData { + SourceStreamTestData(int64_t offset, int64_t bytes_written, bool finished) + : offset(offset), bytes_written(bytes_written), finished(finished) {} + int64_t offset; + int64_t bytes_written; + bool finished; +}; + +int64_t GetBuffersLength(const char** buffers, size_t num_buffer) { + int64_t result = 0; + for (size_t i = 0; i < num_buffer; ++i) + result += static_cast<int64_t>(strlen(buffers[i])); + return result; +} + +std::string GetHexEncodedHashValue(crypto::SecureHash* hash_state) { + if (!hash_state) + return std::string(); + std::vector<char> hash_value(hash_state->GetHashLength()); + hash_state->Finish(&hash_value.front(), hash_value.size()); + return base::HexEncode(&hash_value.front(), hash_value.size()); +} + +class MockDownloadDestinationObserver : public DownloadDestinationObserver { + public: + MOCK_METHOD3(DestinationUpdate, + void(int64_t, + int64_t, + const std::vector<DownloadItem::ReceivedSlice>&)); + void DestinationError( + DownloadInterruptReason reason, + int64_t bytes_so_far, + std::unique_ptr<crypto::SecureHash> hash_state) override { + MockDestinationError(reason, bytes_so_far, + GetHexEncodedHashValue(hash_state.get())); + } + void DestinationCompleted( + int64_t total_bytes, + std::unique_ptr<crypto::SecureHash> hash_state) override { + MockDestinationCompleted(total_bytes, + GetHexEncodedHashValue(hash_state.get())); + } + + MOCK_METHOD3(MockDestinationError, + void(DownloadInterruptReason, int64_t, const std::string&)); + MOCK_METHOD2(MockDestinationCompleted, void(int64_t, const std::string&)); + + // Doesn't override any methods in the base class. Used to make sure + // that the last DestinationUpdate before a Destination{Completed,Error} + // had the right values. + MOCK_METHOD2(CurrentUpdateStatus, void(int64_t, int64_t)); +}; + +enum DownloadFileRenameMethodType { RENAME_AND_UNIQUIFY, RENAME_AND_ANNOTATE }; + +// This is a test DownloadFileImpl that has no retry delay and, on Posix, +// retries renames failed due to ACCESS_DENIED. +class TestDownloadFileImpl : public DownloadFileImpl { + public: + TestDownloadFileImpl(std::unique_ptr<DownloadSaveInfo> save_info, + const base::FilePath& default_downloads_directory, + std::unique_ptr<InputStream> stream, + uint32_t download_id, + base::WeakPtr<DownloadDestinationObserver> observer) + : DownloadFileImpl(std::move(save_info), + default_downloads_directory, + std::move(stream), + download_id, + observer) {} + + protected: + base::TimeDelta GetRetryDelayForFailedRename(int attempt_count) override { + return base::TimeDelta::FromMilliseconds(0); + } + +#if !defined(OS_WIN) + // On Posix, we don't encounter transient errors during renames, except + // possibly EAGAIN, which is difficult to replicate reliably. So we resort to + // simulating a transient error using ACCESS_DENIED instead. + bool ShouldRetryFailedRename(DownloadInterruptReason reason) override { + return reason == DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + } +#endif +}; + +} // namespace + +class DownloadFileTest : public testing::Test { + public: + static const char kTestData1[]; + static const char kTestData2[]; + static const char kTestData3[]; + static const char kTestData4[]; + static const char kTestData5[]; + static const char* kTestData6[]; + static const char* kTestData7[]; + static const char* kTestData8[]; + static const char kDataHash[]; + static const char kEmptyHash[]; + static const uint32_t kDummyDownloadId; + static const int kDummyChildId; + static const int kDummyRequestId; + + DownloadFileTest() + : observer_(new StrictMock<MockDownloadDestinationObserver>), + observer_factory_(observer_.get()), + input_stream_(nullptr), + additional_streams_( + std::vector<StrictMock<MockInputStream>*>{nullptr, nullptr}), + bytes_(-1), + bytes_per_sec_(-1) {} + + ~DownloadFileTest() override {} + + void SetUpdateDownloadInfo( + int64_t bytes, + int64_t bytes_per_sec, + const std::vector<DownloadItem::ReceivedSlice>& received_slices) { + bytes_ = bytes; + bytes_per_sec_ = bytes_per_sec; + } + + void ConfirmUpdateDownloadInfo() { + observer_->CurrentUpdateStatus(bytes_, bytes_per_sec_); + } + + void SetUp() override { + EXPECT_CALL(*(observer_.get()), DestinationUpdate(_, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke(this, &DownloadFileTest::SetUpdateDownloadInfo)); + bool result = download_dir_.CreateUniqueTempDir(); + CHECK(result); + } + + // Mock calls to this function are forwarded here. + void RegisterCallback( + const mojo::SimpleWatcher::ReadyCallback& sink_callback) { + sink_callback_ = sink_callback; + } + + void ClearCallback() { sink_callback_.Reset(); } + + void SetInterruptReasonCallback(const base::Closure& closure, + DownloadInterruptReason* reason_p, + DownloadInterruptReason reason, + int64_t bytes_wasted) { + *reason_p = reason; + closure.Run(); + } + + bool CreateDownloadFile(int offset, bool calculate_hash) { + return CreateDownloadFile(offset, 0, calculate_hash, + DownloadItem::ReceivedSlices()); + } + + bool CreateDownloadFile(int offset, + int length, + bool calculate_hash, + const DownloadItem::ReceivedSlices& received_slices) { + // There can be only one. + DCHECK(!download_file_.get()); + + input_stream_ = new StrictMock<MockInputStream>(); + + // TODO: Need to actually create a function that'll set the variables + // based on the inputs from the callback. + EXPECT_CALL(*input_stream_, RegisterDataReadyCallback(_)) + .WillOnce(Invoke(this, &DownloadFileTest::RegisterCallback)) + .RetiresOnSaturation(); + + std::unique_ptr<DownloadSaveInfo> save_info(new DownloadSaveInfo()); + save_info->offset = offset; + save_info->length = length; + + download_file_.reset(new TestDownloadFileImpl( + std::move(save_info), download_dir_.GetPath(), + std::unique_ptr<MockInputStream>(input_stream_), + DownloadItem::kInvalidId, observer_factory_.GetWeakPtr())); + + EXPECT_CALL(*input_stream_, Read(_, _)) + .WillOnce(Return(InputStream::EMPTY)) + .RetiresOnSaturation(); + + base::WeakPtrFactory<DownloadFileTest> weak_ptr_factory(this); + DownloadInterruptReason result = DOWNLOAD_INTERRUPT_REASON_NONE; + base::RunLoop loop_runner; + download_file_->Initialize( + base::BindRepeating(&DownloadFileTest::SetInterruptReasonCallback, + weak_ptr_factory.GetWeakPtr(), + loop_runner.QuitClosure(), &result), + DownloadFile::CancelRequestCallback(), received_slices, true); + loop_runner.Run(); + + ::testing::Mock::VerifyAndClearExpectations(input_stream_); + return result == DOWNLOAD_INTERRUPT_REASON_NONE; + } + + void DestroyDownloadFile(int offset, bool compare_disk_data = true) { + EXPECT_FALSE(download_file_->InProgress()); + + // Make sure the data has been properly written to disk. + if (compare_disk_data) { + std::string disk_data; + EXPECT_TRUE( + base::ReadFileToString(download_file_->FullPath(), &disk_data)); + EXPECT_EQ(expected_data_, disk_data); + } + + // Make sure the Browser and File threads outlive the DownloadFile + // to satisfy thread checks inside it. + download_file_.reset(); + } + + // Setup the stream to append data or write from |offset| to the file. + // Don't actually trigger the callback or do verifications. + void SetupDataAppend(const char** data_chunks, + size_t num_chunks, + MockInputStream* input_stream, + ::testing::Sequence s, + int64_t offset = -1) { + DCHECK(input_stream); + size_t current_pos = static_cast<size_t>(offset); + for (size_t i = 0; i < num_chunks; i++) { + const char* source_data = data_chunks[i]; + size_t length = strlen(source_data); + scoped_refptr<net::IOBuffer> data = new net::IOBuffer(length); + memcpy(data->data(), source_data, length); + EXPECT_CALL(*input_stream, Read(_, _)) + .InSequence(s) + .WillOnce(DoAll(SetArgPointee<0>(data), SetArgPointee<1>(length), + Return(InputStream::HAS_DATA))) + .RetiresOnSaturation(); + + if (offset < 0) { + // Append data. + expected_data_ += source_data; + continue; + } + + // Write from offset. May fill holes with '\0'. + size_t new_len = current_pos + length; + if (new_len > expected_data_.size()) + expected_data_.append(new_len - expected_data_.size(), '\0'); + expected_data_.replace(current_pos, length, source_data); + current_pos += length; + } + } + + void VerifyStreamAndSize() { + ::testing::Mock::VerifyAndClearExpectations(input_stream_); + int64_t size; + EXPECT_TRUE(base::GetFileSize(download_file_->FullPath(), &size)); + EXPECT_EQ(expected_data_.size(), static_cast<size_t>(size)); + } + + // TODO(rdsmith): Manage full percentage issues properly. + void AppendDataToFile(const char** data_chunks, size_t num_chunks) { + ::testing::Sequence s1; + SetupDataAppend(data_chunks, num_chunks, input_stream_, s1); + EXPECT_CALL(*input_stream_, Read(_, _)) + .InSequence(s1) + .WillOnce(Return(InputStream::EMPTY)) + .RetiresOnSaturation(); + sink_callback_.Run(MOJO_RESULT_OK); + VerifyStreamAndSize(); + } + + void SetupFinishStream(DownloadInterruptReason interrupt_reason, + MockInputStream* input_stream, + ::testing::Sequence s) { + EXPECT_CALL(*input_stream, Read(_, _)) + .InSequence(s) + .WillOnce(Return(InputStream::COMPLETE)) + .RetiresOnSaturation(); + EXPECT_CALL(*input_stream, GetCompletionStatus()) + .InSequence(s) + .WillOnce(Return(interrupt_reason)) + .RetiresOnSaturation(); + EXPECT_CALL(*input_stream, ClearDataReadyCallback()).RetiresOnSaturation(); + } + + void FinishStream(DownloadInterruptReason interrupt_reason, + bool check_observer, + const std::string& expected_hash) { + ::testing::Sequence s1; + SetupFinishStream(interrupt_reason, input_stream_, s1); + sink_callback_.Run(MOJO_RESULT_OK); + VerifyStreamAndSize(); + if (check_observer) { + EXPECT_CALL(*(observer_.get()), + MockDestinationCompleted(_, expected_hash)); + base::RunLoop().RunUntilIdle(); + ::testing::Mock::VerifyAndClearExpectations(observer_.get()); + EXPECT_CALL(*(observer_.get()), DestinationUpdate(_, _, _)) + .Times(AnyNumber()) + .WillRepeatedly( + Invoke(this, &DownloadFileTest::SetUpdateDownloadInfo)); + } + } + + DownloadInterruptReason RenameAndUniquify(const base::FilePath& full_path, + base::FilePath* result_path_p) { + return InvokeRenameMethodAndWaitForCallback(RENAME_AND_UNIQUIFY, full_path, + result_path_p); + } + + DownloadInterruptReason RenameAndAnnotate(const base::FilePath& full_path, + base::FilePath* result_path_p) { + return InvokeRenameMethodAndWaitForCallback(RENAME_AND_ANNOTATE, full_path, + result_path_p); + } + + void ExpectPermissionError(DownloadInterruptReason err) { + EXPECT_TRUE(err == DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR || + err == DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED) + << "Interrupt reason = " << err; + } + + protected: + void InvokeRenameMethod( + DownloadFileRenameMethodType method, + const base::FilePath& full_path, + const DownloadFile::RenameCompletionCallback& completion_callback) { + switch (method) { + case RENAME_AND_UNIQUIFY: + download_file_->RenameAndUniquify(full_path, completion_callback); + break; + + case RENAME_AND_ANNOTATE: + download_file_->RenameAndAnnotate( + full_path, "12345678-ABCD-1234-DCBA-123456789ABC", GURL(), GURL(), + completion_callback); + break; + } + } + + DownloadInterruptReason InvokeRenameMethodAndWaitForCallback( + DownloadFileRenameMethodType method, + const base::FilePath& full_path, + base::FilePath* result_path_p) { + DownloadInterruptReason result_reason(DOWNLOAD_INTERRUPT_REASON_NONE); + base::FilePath result_path; + base::RunLoop loop_runner; + DownloadFile::RenameCompletionCallback completion_callback = + base::Bind(&DownloadFileTest::SetRenameResult, base::Unretained(this), + loop_runner.QuitClosure(), &result_reason, result_path_p); + InvokeRenameMethod(method, full_path, completion_callback); + loop_runner.Run(); + return result_reason; + } + + // Prepare a byte stream to write to the file sink. + void PrepareStream(StrictMock<MockInputStream>** stream, + int64_t offset, + bool create_stream, + bool will_finish, + const char** buffers, + size_t num_buffer) { + if (create_stream) + *stream = new StrictMock<MockInputStream>(); + + // Expectation on MockInputStream for MultipleStreams tests: + // 1. RegisterCallback: Must called twice. One to set the callback, the + // other to release the stream. + // 2. Read: If filled with N buffer, called (N+1) times, where the last Read + // call doesn't read any data but returns STRAM_COMPLETE. + // The stream may terminate in the middle and less Read calls are expected. + // 3. GetStatus: Only called if the stream is completed and last Read call + // returns STREAM_COMPLETE. + Sequence seq; + SetupDataAppend(buffers, num_buffer, *stream, seq, offset); + if (will_finish) + SetupFinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, *stream, seq); + } + + void VerifySourceStreamsStates(const SourceStreamTestData& data) { + DCHECK(download_file_->source_streams_.find(data.offset) != + download_file_->source_streams_.end()) + << "Can't find stream at offset : " << data.offset; + DownloadFileImpl::SourceStream* stream = + download_file_->source_streams_[data.offset].get(); + DCHECK(stream); + EXPECT_EQ(data.offset, stream->offset()); + EXPECT_EQ(data.bytes_written, stream->bytes_written()); + EXPECT_EQ(data.finished, stream->is_finished()); + } + + size_t source_streams_count() const { + DCHECK(download_file_); + return download_file_->source_streams_.size(); + } + + int64_t TotalBytesReceived() const { + DCHECK(download_file_); + return download_file_->TotalBytesReceived(); + } + + std::unique_ptr<StrictMock<MockDownloadDestinationObserver>> observer_; + base::WeakPtrFactory<DownloadDestinationObserver> observer_factory_; + + // DownloadFile instance we are testing. + std::unique_ptr<DownloadFileImpl> download_file_; + + // Stream for sending data into the download file. + // Owned by download_file_; will be alive for lifetime of download_file_. + StrictMock<MockInputStream>* input_stream_; + + // Additional streams to test multiple stream write. + std::vector<StrictMock<MockInputStream>*> additional_streams_; + + // Sink callback data for stream. + mojo::SimpleWatcher::ReadyCallback sink_callback_; + + base::ScopedTempDir download_dir_; + + // Latest update sent to the observer. + int64_t bytes_; + int64_t bytes_per_sec_; + + private: + void SetRenameResult(const base::Closure& closure, + DownloadInterruptReason* reason_p, + base::FilePath* result_path_p, + DownloadInterruptReason reason, + const base::FilePath& result_path) { + if (reason_p) + *reason_p = reason; + if (result_path_p) + *result_path_p = result_path; + closure.Run(); + } + + base::test::ScopedTaskEnvironment scoped_task_environment_; + + // Keep track of what data should be saved to the disk file. + std::string expected_data_; +}; + +// DownloadFile::RenameAndAnnotate and DownloadFile::RenameAndUniquify have a +// considerable amount of functional overlap. In order to re-use test logic, we +// are going to introduce this value parameterized test fixture. It will take a +// DownloadFileRenameMethodType value which can be either of the two rename +// methods. +class DownloadFileTestWithRename + : public DownloadFileTest, + public ::testing::WithParamInterface<DownloadFileRenameMethodType> { + protected: + DownloadInterruptReason InvokeSelectedRenameMethod( + const base::FilePath& full_path, + base::FilePath* result_path_p) { + return InvokeRenameMethodAndWaitForCallback(GetParam(), full_path, + result_path_p); + } +}; + +// And now instantiate all DownloadFileTestWithRename tests using both +// DownloadFile rename methods. Each test of the form +// DownloadFileTestWithRename.<FooTest> will be instantiated once with +// RenameAndAnnotate as the value parameter and once with RenameAndUniquify as +// the value parameter. +INSTANTIATE_TEST_CASE_P(DownloadFile, + DownloadFileTestWithRename, + ::testing::Values(RENAME_AND_ANNOTATE, + RENAME_AND_UNIQUIFY)); + +const char DownloadFileTest::kTestData1[] = + "Let's write some data to the file!\n"; +const char DownloadFileTest::kTestData2[] = "Writing more data.\n"; +const char DownloadFileTest::kTestData3[] = "Final line."; +const char DownloadFileTest::kTestData4[] = "abcdefg"; +const char DownloadFileTest::kTestData5[] = "01234"; +const char* DownloadFileTest::kTestData6[] = {kTestData1, kTestData2}; +const char* DownloadFileTest::kTestData7[] = {kTestData4, kTestData5}; +const char* DownloadFileTest::kTestData8[] = {kTestData1, kTestData2, + kTestData4, kTestData5}; + +const char DownloadFileTest::kDataHash[] = + "CBF68BF10F8003DB86B31343AFAC8C7175BD03FB5FC905650F8C80AF087443A8"; +const char DownloadFileTest::kEmptyHash[] = + "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855"; + +const uint32_t DownloadFileTest::kDummyDownloadId = 23; +const int DownloadFileTest::kDummyChildId = 3; +const int DownloadFileTest::kDummyRequestId = 67; + +// Rename the file before any data is downloaded, after some has, after it all +// has, and after it's closed. +TEST_P(DownloadFileTestWithRename, RenameFileFinal) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + base::FilePath path_1(initial_path.InsertBeforeExtensionASCII("_1")); + base::FilePath path_2(initial_path.InsertBeforeExtensionASCII("_2")); + base::FilePath path_3(initial_path.InsertBeforeExtensionASCII("_3")); + base::FilePath path_4(initial_path.InsertBeforeExtensionASCII("_4")); + base::FilePath output_path; + + // Rename the file before downloading any data. + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + InvokeSelectedRenameMethod(path_1, &output_path)); + base::FilePath renamed_path = download_file_->FullPath(); + EXPECT_EQ(path_1, renamed_path); + EXPECT_EQ(path_1, output_path); + + // Check the files. + EXPECT_FALSE(base::PathExists(initial_path)); + EXPECT_TRUE(base::PathExists(path_1)); + + // Download the data. + const char* chunks1[] = {kTestData1, kTestData2}; + AppendDataToFile(chunks1, 2); + + // Rename the file after downloading some data. + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + InvokeSelectedRenameMethod(path_2, &output_path)); + renamed_path = download_file_->FullPath(); + EXPECT_EQ(path_2, renamed_path); + EXPECT_EQ(path_2, output_path); + + // Check the files. + EXPECT_FALSE(base::PathExists(path_1)); + EXPECT_TRUE(base::PathExists(path_2)); + + const char* chunks2[] = {kTestData3}; + AppendDataToFile(chunks2, 1); + + // Rename the file after downloading all the data. + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + InvokeSelectedRenameMethod(path_3, &output_path)); + renamed_path = download_file_->FullPath(); + EXPECT_EQ(path_3, renamed_path); + EXPECT_EQ(path_3, output_path); + + // Check the files. + EXPECT_FALSE(base::PathExists(path_2)); + EXPECT_TRUE(base::PathExists(path_3)); + + FinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, true, kDataHash); + base::RunLoop().RunUntilIdle(); + + // Rename the file after downloading all the data and closing the file. + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + InvokeSelectedRenameMethod(path_4, &output_path)); + renamed_path = download_file_->FullPath(); + EXPECT_EQ(path_4, renamed_path); + EXPECT_EQ(path_4, output_path); + + // Check the files. + EXPECT_FALSE(base::PathExists(path_3)); + EXPECT_TRUE(base::PathExists(path_4)); + + DestroyDownloadFile(0); +} + +// Test to make sure the rename overwrites when requested. This is separate from +// the above test because it only applies to RenameAndAnnotate(). +// RenameAndUniquify() doesn't overwrite by design. +TEST_F(DownloadFileTest, RenameOverwrites) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + base::FilePath path_1(initial_path.InsertBeforeExtensionASCII("_1")); + + ASSERT_FALSE(base::PathExists(path_1)); + static const char file_data[] = "xyzzy"; + ASSERT_EQ(static_cast<int>(sizeof(file_data)), + base::WriteFile(path_1, file_data, sizeof(file_data))); + ASSERT_TRUE(base::PathExists(path_1)); + + base::FilePath new_path; + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + RenameAndAnnotate(path_1, &new_path)); + EXPECT_EQ(path_1.value(), new_path.value()); + + std::string file_contents; + ASSERT_TRUE(base::ReadFileToString(new_path, &file_contents)); + EXPECT_NE(std::string(file_data), file_contents); + + FinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, true, kEmptyHash); + base::RunLoop().RunUntilIdle(); + DestroyDownloadFile(0); +} + +// Test to make sure the rename uniquifies if we aren't overwriting +// and there's a file where we're aiming. As above, not a +// DownloadFileTestWithRename test because this only applies to +// RenameAndUniquify(). +TEST_F(DownloadFileTest, RenameUniquifies) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + base::FilePath path_1(initial_path.InsertBeforeExtensionASCII("_1")); + base::FilePath path_1_suffixed(path_1.InsertBeforeExtensionASCII(" (1)")); + + ASSERT_FALSE(base::PathExists(path_1)); + static const char file_data[] = "xyzzy"; + ASSERT_EQ(static_cast<int>(sizeof(file_data)), + base::WriteFile(path_1, file_data, sizeof(file_data))); + ASSERT_TRUE(base::PathExists(path_1)); + + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, RenameAndUniquify(path_1, nullptr)); + EXPECT_TRUE(base::PathExists(path_1_suffixed)); + + FinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, true, kEmptyHash); + base::RunLoop().RunUntilIdle(); + DestroyDownloadFile(0); +} + +// Test that RenameAndUniquify doesn't try to uniquify in the case where the +// target filename is the same as the current filename. +TEST_F(DownloadFileTest, RenameRecognizesSelfConflict) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + + base::FilePath new_path; + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + RenameAndUniquify(initial_path, &new_path)); + EXPECT_TRUE(base::PathExists(initial_path)); + + FinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, true, kEmptyHash); + base::RunLoop().RunUntilIdle(); + DestroyDownloadFile(0); + EXPECT_EQ(initial_path.value(), new_path.value()); +} + +// Test to make sure we get the proper error on failure. +TEST_P(DownloadFileTestWithRename, RenameError) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + + // Create a subdirectory. + base::FilePath target_dir( + initial_path.DirName().Append(FILE_PATH_LITERAL("TargetDir"))); + ASSERT_FALSE(base::DirectoryExists(target_dir)); + ASSERT_TRUE(base::CreateDirectory(target_dir)); + base::FilePath target_path(target_dir.Append(initial_path.BaseName())); + + // Targets + base::FilePath target_path_suffixed( + target_path.InsertBeforeExtensionASCII(" (1)")); + ASSERT_FALSE(base::PathExists(target_path)); + ASSERT_FALSE(base::PathExists(target_path_suffixed)); + + // Make the directory unwritable and try to rename within it. + { + base::FilePermissionRestorer restorer(target_dir); + ASSERT_TRUE(base::MakeFileUnwritable(target_dir)); + + // Expect nulling out of further processing. + EXPECT_CALL(*input_stream_, ClearDataReadyCallback()); + ExpectPermissionError(InvokeSelectedRenameMethod(target_path, nullptr)); + EXPECT_FALSE(base::PathExists(target_path_suffixed)); + } + + FinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, true, kEmptyHash); + base::RunLoop().RunUntilIdle(); + DestroyDownloadFile(0); +} + +namespace { + +void TestRenameCompletionCallback(const base::Closure& closure, + bool* did_run_callback, + DownloadInterruptReason interrupt_reason, + const base::FilePath& new_path) { + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, interrupt_reason); + *did_run_callback = true; + closure.Run(); +} + +} // namespace + +// Test that the retry logic works. This test assumes that DownloadFileImpl will +// post tasks to the current message loop (acting as the download sequence) +// asynchronously to retry the renames. We will stuff RunLoop::QuitClosures() +// in between the retry tasks to stagger them and then allow the rename to +// succeed. +// +// Note that there is only one queue of tasks to run, and that is in the tests' +// base::MessageLoop::current(). Each RunLoop processes that queue until it sees +// a QuitClosure() targeted at itself, at which point it stops processing. +TEST_P(DownloadFileTestWithRename, RenameWithErrorRetry) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + + // Create a subdirectory. + base::FilePath target_dir( + initial_path.DirName().Append(FILE_PATH_LITERAL("TargetDir"))); + ASSERT_FALSE(base::DirectoryExists(target_dir)); + ASSERT_TRUE(base::CreateDirectory(target_dir)); + base::FilePath target_path(target_dir.Append(initial_path.BaseName())); + + bool did_run_callback = false; + + // Each RunLoop can be used the run the MessageLoop until the corresponding + // QuitClosure() is run. This one is used to produce the QuitClosure() that + // will be run when the entire rename operation is complete. + base::RunLoop succeeding_run; + { +// (Scope for the base::File or base::FilePermissionRestorer below.) +#if defined(OS_WIN) + // On Windows we test with an actual transient error, a sharing violation. + // The rename will fail because we are holding the file open for READ. On + // Posix this doesn't cause a failure. + base::File locked_file(initial_path, + base::File::FLAG_OPEN | base::File::FLAG_READ); + ASSERT_TRUE(locked_file.IsValid()); +#else + // Simulate a transient failure by revoking write permission for target_dir. + // The TestDownloadFileImpl class treats this error as transient even though + // DownloadFileImpl itself doesn't. + base::FilePermissionRestorer restore_permissions_for(target_dir); + ASSERT_TRUE(base::MakeFileUnwritable(target_dir)); +#endif + + // The Rename() should fail here and enqueue a retry task without invoking + // the completion callback. + InvokeRenameMethod( + GetParam(), target_path, + base::Bind(&TestRenameCompletionCallback, succeeding_run.QuitClosure(), + &did_run_callback)); + EXPECT_FALSE(did_run_callback); + + base::RunLoop first_failing_run; + // Queue the QuitClosure() on the MessageLoop now. Any tasks queued by the + // Rename() will be in front of the QuitClosure(). Running the message loop + // now causes the just the first retry task to be run. The rename still + // fails, so another retry task would get queued behind the QuitClosure(). + base::ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, first_failing_run.QuitClosure()); + first_failing_run.Run(); + EXPECT_FALSE(did_run_callback); + + // Running another loop should have the same effect as the above as long as + // kMaxRenameRetries is greater than 2. + base::RunLoop second_failing_run; + base::ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, second_failing_run.QuitClosure()); + second_failing_run.Run(); + EXPECT_FALSE(did_run_callback); + } + + // This time the QuitClosure from succeeding_run should get executed. + succeeding_run.Run(); + EXPECT_TRUE(did_run_callback); + + FinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, true, kEmptyHash); + base::RunLoop().RunUntilIdle(); + DestroyDownloadFile(0); +} + +// Various tests of the StreamActive method. +TEST_F(DownloadFileTest, StreamEmptySuccess) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + + // Test that calling the sink_callback_ on an empty stream shouldn't + // do anything. + AppendDataToFile(nullptr, 0); + + // Finish the download this way and make sure we see it on the observer. + FinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, true, kEmptyHash); + base::RunLoop().RunUntilIdle(); + + DestroyDownloadFile(0); +} + +TEST_F(DownloadFileTest, StreamEmptyError) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + + // Finish the download in error and make sure we see it on the + // observer. + EXPECT_CALL( + *(observer_.get()), + MockDestinationError(DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED, 0, + kEmptyHash)) + .WillOnce(InvokeWithoutArgs( + this, &DownloadFileTest::ConfirmUpdateDownloadInfo)); + + // If this next EXPECT_CALL fails flakily, it's probably a real failure. + // We'll be getting a stream of UpdateDownload calls from the timer, and + // the last one may have the correct information even if the failure + // doesn't produce an update, as the timer update may have triggered at the + // same time. + EXPECT_CALL(*(observer_.get()), CurrentUpdateStatus(0, _)); + + FinishStream(DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED, false, + kEmptyHash); + + base::RunLoop().RunUntilIdle(); + + DestroyDownloadFile(0); +} + +TEST_F(DownloadFileTest, StreamNonEmptySuccess) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + + const char* chunks1[] = {kTestData1, kTestData2}; + ::testing::Sequence s1; + SetupDataAppend(chunks1, 2, input_stream_, s1); + SetupFinishStream(DOWNLOAD_INTERRUPT_REASON_NONE, input_stream_, s1); + EXPECT_CALL(*(observer_.get()), MockDestinationCompleted(_, _)); + sink_callback_.Run(MOJO_RESULT_OK); + VerifyStreamAndSize(); + base::RunLoop().RunUntilIdle(); + DestroyDownloadFile(0); +} + +TEST_F(DownloadFileTest, StreamNonEmptyError) { + ASSERT_TRUE(CreateDownloadFile(0, true)); + base::FilePath initial_path(download_file_->FullPath()); + EXPECT_TRUE(base::PathExists(initial_path)); + + const char* chunks1[] = {kTestData1, kTestData2}; + ::testing::Sequence s1; + SetupDataAppend(chunks1, 2, input_stream_, s1); + SetupFinishStream(DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED, + input_stream_, s1); + + EXPECT_CALL(*(observer_.get()), + MockDestinationError( + DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED, _, _)) + .WillOnce(InvokeWithoutArgs( + this, &DownloadFileTest::ConfirmUpdateDownloadInfo)); + + // If this next EXPECT_CALL fails flakily, it's probably a real failure. + // We'll be getting a stream of UpdateDownload calls from the timer, and + // the last one may have the correct information even if the failure + // doesn't produce an update, as the timer update may have triggered at the + // same time. + EXPECT_CALL(*(observer_.get()), + CurrentUpdateStatus(strlen(kTestData1) + strlen(kTestData2), _)); + + sink_callback_.Run(MOJO_RESULT_OK); + base::RunLoop().RunUntilIdle(); + VerifyStreamAndSize(); + DestroyDownloadFile(0); +} + +// Tests for concurrent streams handling, used for parallel download. +// +// Activate both streams at the same time. +TEST_F(DownloadFileTest, MultipleStreamsWrite) { + int64_t stream_0_length = GetBuffersLength(kTestData6, 2); + int64_t stream_1_length = GetBuffersLength(kTestData7, 2); + + ASSERT_TRUE(CreateDownloadFile(0, stream_0_length, true, + DownloadItem::ReceivedSlices())); + + PrepareStream(&input_stream_, 0, false, true, kTestData6, 2); + PrepareStream(&additional_streams_[0], stream_0_length, true, true, + kTestData7, 2); + + EXPECT_CALL(*additional_streams_[0], RegisterDataReadyCallback(_)) + .RetiresOnSaturation(); + EXPECT_CALL(*(observer_.get()), MockDestinationCompleted(_, _)); + + // Activate the streams. + download_file_->AddInputStream( + std::unique_ptr<MockInputStream>(additional_streams_[0]), stream_0_length, + DownloadSaveInfo::kLengthFullContent); + sink_callback_.Run(MOJO_RESULT_OK); + base::RunLoop().RunUntilIdle(); + + SourceStreamTestData stream_data_0(0, stream_0_length, true); + SourceStreamTestData stream_data_1(stream_0_length, stream_1_length, true); + VerifySourceStreamsStates(stream_data_0); + VerifySourceStreamsStates(stream_data_1); + EXPECT_EQ(stream_0_length + stream_1_length, TotalBytesReceived()); + + DestroyDownloadFile(0); +} + +// 3 streams write to one sink, the second stream has a limited length. +TEST_F(DownloadFileTest, MutipleStreamsLimitedLength) { + int64_t stream_0_length = GetBuffersLength(kTestData6, 2); + + // The second stream has a limited length and should be partially written + // to disk. When we prepare the stream, we fill the stream with 2 full buffer. + int64_t stream_1_length = GetBuffersLength(kTestData7, 2) - 1; + + // The last stream can't have length limit, it's a half open request, e.g + // "Range:50-". + int64_t stream_2_length = GetBuffersLength(kTestData6, 2); + + ASSERT_TRUE(CreateDownloadFile(0, stream_0_length, true, + DownloadItem::ReceivedSlices())); + + PrepareStream(&input_stream_, 0, false, true, kTestData6, 2); + PrepareStream(&additional_streams_[0], stream_0_length, true, false, + kTestData7, 2); + PrepareStream(&additional_streams_[1], stream_0_length + stream_1_length, + true, true, kTestData6, 2); + + EXPECT_CALL(*additional_streams_[0], RegisterDataReadyCallback(_)) + .Times(1) + .RetiresOnSaturation(); + + EXPECT_CALL(*additional_streams_[0], ClearDataReadyCallback()) + .Times(1) + .RetiresOnSaturation(); + + EXPECT_CALL(*additional_streams_[1], RegisterDataReadyCallback(_)) + .RetiresOnSaturation(); + + EXPECT_CALL(*(observer_.get()), MockDestinationCompleted(_, _)); + + // Activate all the streams. + download_file_->AddInputStream( + std::unique_ptr<MockInputStream>(additional_streams_[0]), stream_0_length, + stream_1_length); + download_file_->AddInputStream( + std::unique_ptr<MockInputStream>(additional_streams_[1]), + stream_0_length + stream_1_length, DownloadSaveInfo::kLengthFullContent); + sink_callback_.Run(MOJO_RESULT_OK); + base::RunLoop().RunUntilIdle(); + + SourceStreamTestData stream_data_0(0, stream_0_length, true); + SourceStreamTestData stream_data_1(stream_0_length, stream_1_length, true); + SourceStreamTestData stream_data_2(stream_0_length + stream_1_length, + stream_2_length, true); + + VerifySourceStreamsStates(stream_data_0); + VerifySourceStreamsStates(stream_data_1); + VerifySourceStreamsStates(stream_data_2); + + EXPECT_EQ(stream_0_length + stream_1_length + stream_2_length, + TotalBytesReceived()); + + download_file_->Cancel(); + DestroyDownloadFile(0, false); +} + +// Activate and deplete one stream, later add the second stream. +TEST_F(DownloadFileTest, MultipleStreamsFirstStreamWriteAllData) { + int64_t stream_0_length = GetBuffersLength(kTestData8, 4); + + ASSERT_TRUE(CreateDownloadFile(0, DownloadSaveInfo::kLengthFullContent, true, + DownloadItem::ReceivedSlices())); + + PrepareStream(&input_stream_, 0, false, true, kTestData8, 4); + + EXPECT_CALL(*(observer_.get()), MockDestinationCompleted(_, _)); + + sink_callback_.Run(MOJO_RESULT_OK); + base::RunLoop().RunUntilIdle(); + + // Add another stream, the file is already closed, so nothing should be + // called. + EXPECT_FALSE(download_file_->InProgress()); + + additional_streams_[0] = new StrictMock<MockInputStream>(); + download_file_->AddInputStream( + std::unique_ptr<MockInputStream>(additional_streams_[0]), + stream_0_length - 1, DownloadSaveInfo::kLengthFullContent); + base::RunLoop().RunUntilIdle(); + + SourceStreamTestData stream_data_0(0, stream_0_length, true); + VerifySourceStreamsStates(stream_data_0); + EXPECT_EQ(stream_0_length, TotalBytesReceived()); + EXPECT_EQ(1u, source_streams_count()); + + DestroyDownloadFile(0); +} + +// While one stream is writing, kick off another stream with an offset that has +// been written by the first one. +TEST_F(DownloadFileTest, SecondStreamStartingOffsetAlreadyWritten) { + int64_t stream_0_length = GetBuffersLength(kTestData6, 2); + + ASSERT_TRUE(CreateDownloadFile(0, stream_0_length, true, + DownloadItem::ReceivedSlices())); + + Sequence seq; + SetupDataAppend(kTestData6, 2, input_stream_, seq, 0); + + EXPECT_CALL(*input_stream_, Read(_, _)) + .InSequence(seq) + .WillOnce(Return(InputStream::EMPTY)) + .RetiresOnSaturation(); + sink_callback_.Run(MOJO_RESULT_OK); + base::RunLoop().RunUntilIdle(); + + additional_streams_[0] = new StrictMock<MockInputStream>(); + EXPECT_CALL(*additional_streams_[0], RegisterDataReadyCallback(_)) + .WillRepeatedly(Invoke(this, &DownloadFileTest::RegisterCallback)) + .RetiresOnSaturation(); + EXPECT_CALL(*additional_streams_[0], ClearDataReadyCallback()) + .WillRepeatedly(Invoke(this, &DownloadFileTest::ClearCallback)) + .RetiresOnSaturation(); + EXPECT_CALL(*additional_streams_[0], Read(_, _)) + .WillOnce(Return(InputStream::EMPTY)) + .RetiresOnSaturation(); + + download_file_->AddInputStream( + std::unique_ptr<MockInputStream>(additional_streams_[0]), 0, + DownloadSaveInfo::kLengthFullContent); + + // The stream should get terminated and reset the callback. + EXPECT_TRUE(sink_callback_.is_null()); + download_file_->Cancel(); + DestroyDownloadFile(0, false); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_interrupt_reasons_utils.cc b/chromium/components/download/internal/common/download_interrupt_reasons_utils.cc new file mode 100644 index 00000000000..f77d05e6052 --- /dev/null +++ b/chromium/components/download/internal/common/download_interrupt_reasons_utils.cc @@ -0,0 +1,156 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_interrupt_reasons_utils.h" + +#include "base/logging.h" + +namespace download { + +DownloadInterruptReason ConvertFileErrorToInterruptReason( + base::File::Error file_error) { + switch (file_error) { + case base::File::FILE_OK: + return DOWNLOAD_INTERRUPT_REASON_NONE; + + case base::File::FILE_ERROR_IN_USE: + return DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + + case base::File::FILE_ERROR_ACCESS_DENIED: + return DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + + case base::File::FILE_ERROR_TOO_MANY_OPENED: + return DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + + case base::File::FILE_ERROR_NO_MEMORY: + return DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + + case base::File::FILE_ERROR_NO_SPACE: + return DOWNLOAD_INTERRUPT_REASON_FILE_NO_SPACE; + + case base::File::FILE_ERROR_SECURITY: + return DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + + default: + return DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + } +} + +DownloadInterruptReason ConvertNetErrorToInterruptReason( + net::Error net_error, + DownloadInterruptSource source) { + switch (net_error) { + case net::OK: + return DOWNLOAD_INTERRUPT_REASON_NONE; + + // File errors. + + // The file is too large. + case net::ERR_FILE_TOO_BIG: + return DOWNLOAD_INTERRUPT_REASON_FILE_TOO_LARGE; + + // Permission to access a resource, other than the network, was denied. + case net::ERR_ACCESS_DENIED: + return DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED; + + // There were not enough resources to complete the operation. + case net::ERR_INSUFFICIENT_RESOURCES: + return DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + + // Memory allocation failed. + case net::ERR_OUT_OF_MEMORY: + return DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR; + + // The path or file name is too long. + case net::ERR_FILE_PATH_TOO_LONG: + return DOWNLOAD_INTERRUPT_REASON_FILE_NAME_TOO_LONG; + + // Not enough room left on the disk. + case net::ERR_FILE_NO_SPACE: + return DOWNLOAD_INTERRUPT_REASON_FILE_NO_SPACE; + + // The file has a virus. + case net::ERR_FILE_VIRUS_INFECTED: + return DOWNLOAD_INTERRUPT_REASON_FILE_VIRUS_INFECTED; + + // The file was blocked by local policy. + case net::ERR_BLOCKED_BY_CLIENT: + return DOWNLOAD_INTERRUPT_REASON_FILE_BLOCKED; + + // Network errors. + + // The network operation timed out. + case net::ERR_TIMED_OUT: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_TIMEOUT; + + // The network connection was lost or changed. + case net::ERR_NETWORK_CHANGED: + case net::ERR_INTERNET_DISCONNECTED: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED; + + // The server has gone down. + case net::ERR_CONNECTION_FAILED: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_SERVER_DOWN; + + // Server responses. + + // The server does not support range requests. + case net::ERR_REQUEST_RANGE_NOT_SATISFIABLE: + return DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE; + + case net::ERR_CONTENT_LENGTH_MISMATCH: + return DOWNLOAD_INTERRUPT_REASON_SERVER_CONTENT_LENGTH_MISMATCH; + + default: + break; + } + + // Handle errors that don't have mappings, depending on the source. + switch (source) { + case DOWNLOAD_INTERRUPT_FROM_DISK: + return DOWNLOAD_INTERRUPT_REASON_FILE_FAILED; + case DOWNLOAD_INTERRUPT_FROM_NETWORK: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED; + case DOWNLOAD_INTERRUPT_FROM_SERVER: + return DOWNLOAD_INTERRUPT_REASON_SERVER_FAILED; + default: + break; + } + + NOTREACHED(); + + return DOWNLOAD_INTERRUPT_REASON_NONE; +} + +DownloadInterruptReason ConvertMojoNetworkRequestStatusToInterruptReason( + mojom::NetworkRequestStatus status) { + switch (status) { + case mojom::NetworkRequestStatus::OK: + return DOWNLOAD_INTERRUPT_REASON_NONE; + case mojom::NetworkRequestStatus::NETWORK_TIMEOUT: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_TIMEOUT; + case mojom::NetworkRequestStatus::NETWORK_DISCONNECTED: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED; + case mojom::NetworkRequestStatus::NETWORK_SERVER_DOWN: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_SERVER_DOWN; + case mojom::NetworkRequestStatus::SERVER_NO_RANGE: + return DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE; + case mojom::NetworkRequestStatus::SERVER_CONTENT_LENGTH_MISMATCH: + return DOWNLOAD_INTERRUPT_REASON_SERVER_CONTENT_LENGTH_MISMATCH; + case mojom::NetworkRequestStatus::SERVER_UNREACHABLE: + return DOWNLOAD_INTERRUPT_REASON_SERVER_UNREACHABLE; + case mojom::NetworkRequestStatus::SERVER_CERT_PROBLEM: + return DOWNLOAD_INTERRUPT_REASON_SERVER_CERT_PROBLEM; + case mojom::NetworkRequestStatus::USER_CANCELED: + return DOWNLOAD_INTERRUPT_REASON_USER_CANCELED; + case mojom::NetworkRequestStatus::NETWORK_FAILED: + return DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED; + default: + NOTREACHED(); + break; + } + return DOWNLOAD_INTERRUPT_REASON_NONE; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_item_impl.cc b/chromium/components/download/internal/common/download_item_impl.cc new file mode 100644 index 00000000000..7181a2ca5c8 --- /dev/null +++ b/chromium/components/download/internal/common/download_item_impl.cc @@ -0,0 +1,2550 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// File method ordering: Methods in this file are in the same order as +// in download_item_impl.h, with the following exception: The public +// interface Start is placed in chronological order with the other +// (private) routines that together define a DownloadItem's state +// transitions as the download progresses. See "Download progression +// cascade" later in this file. + +// A regular DownloadItem (created for a download in this session of +// the browser) normally goes through the following states: +// * Created (when download starts) +// * Destination filename determined +// * Entered into the history database. +// * Made visible in the download shelf. +// * All the data is saved. Note that the actual data download occurs +// in parallel with the above steps, but until those steps are +// complete, the state of the data save will be ignored. +// * Download file is renamed to its final name, and possibly +// auto-opened. + +#include "components/download/public/common/download_item_impl.h" + +#include <memory> +#include <utility> +#include <vector> + +#include "base/bind.h" +#include "base/files/file_util.h" +#include "base/format_macros.h" +#include "base/guid.h" +#include "base/logging.h" +#include "base/metrics/histogram_macros.h" +#include "base/optional.h" +#include "base/stl_util.h" +#include "base/strings/string_util.h" +#include "base/strings/stringprintf.h" +#include "base/strings/utf_string_conversions.h" +#include "base/task_runner_util.h" +#include "components/download/downloader/in_progress/download_entry.h" +#include "components/download/internal/common/download_job_impl.h" +#include "components/download/internal/common/parallel_download_utils.h" +#include "components/download/public/common/download_danger_type.h" +#include "components/download/public/common/download_file.h" +#include "components/download/public/common/download_interrupt_reasons.h" +#include "components/download/public/common/download_item_impl_delegate.h" +#include "components/download/public/common/download_job_factory.h" +#include "components/download/public/common/download_stats.h" +#include "components/download/public/common/download_task_runner.h" +#include "components/download/public/common/download_ukm_helper.h" +#include "components/download/public/common/download_url_parameters.h" +#include "net/http/http_response_headers.h" +#include "net/http/http_status_code.h" +#include "net/traffic_annotation/network_traffic_annotation.h" + +namespace download { + +namespace { + +bool DeleteDownloadedFile(const base::FilePath& path) { + DCHECK(GetDownloadTaskRunner()->RunsTasksInCurrentSequence()); + + // Make sure we only delete files. + if (base::DirectoryExists(path)) + return true; + return base::DeleteFile(path, false); +} + +void DeleteDownloadedFileDone(base::WeakPtr<DownloadItemImpl> item, + const base::Callback<void(bool)>& callback, + bool success) { + if (success && item.get()) + item->OnDownloadedFileRemoved(); + callback.Run(success); +} + +// Wrapper around DownloadFile::Detach and DownloadFile::Cancel that +// takes ownership of the DownloadFile and hence implicitly destroys it +// at the end of the function. +base::FilePath DownloadFileDetach(std::unique_ptr<DownloadFile> download_file) { + DCHECK(GetDownloadTaskRunner()->RunsTasksInCurrentSequence()); + base::FilePath full_path = download_file->FullPath(); + download_file->Detach(); + return full_path; +} + +base::FilePath MakeCopyOfDownloadFile(DownloadFile* download_file) { + DCHECK(GetDownloadTaskRunner()->RunsTasksInCurrentSequence()); + base::FilePath temp_file_path; + if (!base::CreateTemporaryFile(&temp_file_path)) + return base::FilePath(); + + if (!base::CopyFile(download_file->FullPath(), temp_file_path)) { + DeleteDownloadedFile(temp_file_path); + return base::FilePath(); + } + + return temp_file_path; +} + +void DownloadFileCancel(std::unique_ptr<DownloadFile> download_file) { + DCHECK(GetDownloadTaskRunner()->RunsTasksInCurrentSequence()); + download_file->Cancel(); +} + +// Most of the cancellation pathways behave the same whether the cancellation +// was initiated by ther user (CANCELED) or initiated due to browser context +// shutdown (SHUTDOWN). +bool IsCancellation(DownloadInterruptReason reason) { + return reason == DOWNLOAD_INTERRUPT_REASON_USER_SHUTDOWN || + reason == DOWNLOAD_INTERRUPT_REASON_USER_CANCELED; +} + +std::string GetDownloadTypeNames(DownloadItem::DownloadType type) { + switch (type) { + case DownloadItem::TYPE_ACTIVE_DOWNLOAD: + return "NEW_DOWNLOAD"; + case DownloadItem::TYPE_HISTORY_IMPORT: + return "HISTORY_IMPORT"; + case DownloadItem::TYPE_SAVE_PAGE_AS: + return "SAVE_PAGE_AS"; + default: + NOTREACHED(); + return "INVALID_TYPE"; + } +} + +std::string GetDownloadDangerNames(DownloadDangerType type) { + switch (type) { + case DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS: + return "NOT_DANGEROUS"; + case DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE: + return "DANGEROUS_FILE"; + case DOWNLOAD_DANGER_TYPE_DANGEROUS_URL: + return "DANGEROUS_URL"; + case DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT: + return "DANGEROUS_CONTENT"; + case DOWNLOAD_DANGER_TYPE_MAYBE_DANGEROUS_CONTENT: + return "MAYBE_DANGEROUS_CONTENT"; + case DOWNLOAD_DANGER_TYPE_UNCOMMON_CONTENT: + return "UNCOMMON_CONTENT"; + case DOWNLOAD_DANGER_TYPE_USER_VALIDATED: + return "USER_VALIDATED"; + case DOWNLOAD_DANGER_TYPE_DANGEROUS_HOST: + return "DANGEROUS_HOST"; + case DOWNLOAD_DANGER_TYPE_POTENTIALLY_UNWANTED: + return "POTENTIALLY_UNWANTED"; + default: + NOTREACHED(); + return "UNKNOWN_DANGER_TYPE"; + } +} + +class DownloadItemActivatedData + : public base::trace_event::ConvertableToTraceFormat { + public: + DownloadItemActivatedData(DownloadItem::DownloadType download_type, + uint32_t download_id, + std::string original_url, + std::string final_url, + std::string file_name, + DownloadDangerType danger_type, + int64_t start_offset, + bool has_user_gesture) + : download_type_(download_type), + download_id_(download_id), + original_url_(original_url), + final_url_(final_url), + file_name_(file_name), + danger_type_(danger_type), + start_offset_(start_offset), + has_user_gesture_(has_user_gesture) {} + + ~DownloadItemActivatedData() override = default; + + void AppendAsTraceFormat(std::string* out) const override { + out->append("{"); + out->append(base::StringPrintf( + "\"type\":\"%s\",", GetDownloadTypeNames(download_type_).c_str())); + out->append(base::StringPrintf("\"id\":\"%d\",", download_id_)); + out->append( + base::StringPrintf("\"original_url\":\"%s\",", original_url_.c_str())); + out->append( + base::StringPrintf("\"final_url\":\"%s\",", final_url_.c_str())); + out->append( + base::StringPrintf("\"file_name\":\"%s\",", file_name_.c_str())); + out->append( + base::StringPrintf("\"danger_type\":\"%s\",", + GetDownloadDangerNames(danger_type_).c_str())); + out->append( + base::StringPrintf("\"start_offset\":\"%" PRId64 "\",", start_offset_)); + out->append(base::StringPrintf("\"has_user_gesture\":\"%s\"", + has_user_gesture_ ? "true" : "false")); + out->append("}"); + } + + private: + DownloadItem::DownloadType download_type_; + uint32_t download_id_; + std::string original_url_; + std::string final_url_; + std::string file_name_; + DownloadDangerType danger_type_; + int64_t start_offset_; + bool has_user_gesture_; + DISALLOW_COPY_AND_ASSIGN(DownloadItemActivatedData); +}; + +} // namespace + +// The maximum number of attempts we will make to resume automatically. +const int DownloadItemImpl::kMaxAutoResumeAttempts = 5; + +DownloadItemImpl::RequestInfo::RequestInfo( + const std::vector<GURL>& url_chain, + const GURL& referrer_url, + const GURL& site_url, + const GURL& tab_url, + const GURL& tab_referrer_url, + const std::string& suggested_filename, + const base::FilePath& forced_file_path, + ui::PageTransition transition_type, + bool has_user_gesture, + const std::string& remote_address, + base::Time start_time) + : url_chain(url_chain), + referrer_url(referrer_url), + site_url(site_url), + tab_url(tab_url), + tab_referrer_url(tab_referrer_url), + suggested_filename(suggested_filename), + forced_file_path(forced_file_path), + transition_type(transition_type), + has_user_gesture(has_user_gesture), + remote_address(remote_address), + start_time(start_time) {} + +DownloadItemImpl::RequestInfo::RequestInfo(const GURL& url) + : url_chain(std::vector<GURL>(1, url)), start_time(base::Time::Now()) {} + +DownloadItemImpl::RequestInfo::RequestInfo() = default; + +DownloadItemImpl::RequestInfo::RequestInfo( + const DownloadItemImpl::RequestInfo& other) = default; + +DownloadItemImpl::RequestInfo::~RequestInfo() = default; + +DownloadItemImpl::DestinationInfo::DestinationInfo( + const base::FilePath& target_path, + const base::FilePath& current_path, + int64_t received_bytes, + bool all_data_saved, + const std::string& hash, + base::Time end_time) + : target_path(target_path), + current_path(current_path), + received_bytes(received_bytes), + all_data_saved(all_data_saved), + hash(hash), + end_time(end_time) {} + +DownloadItemImpl::DestinationInfo::DestinationInfo( + TargetDisposition target_disposition) + : target_disposition(target_disposition) {} + +DownloadItemImpl::DestinationInfo::DestinationInfo() = default; + +DownloadItemImpl::DestinationInfo::DestinationInfo( + const DownloadItemImpl::DestinationInfo& other) = default; + +DownloadItemImpl::DestinationInfo::~DestinationInfo() = default; + +// Constructor for reading from the history service. +DownloadItemImpl::DownloadItemImpl( + DownloadItemImplDelegate* delegate, + const std::string& guid, + uint32_t download_id, + const base::FilePath& current_path, + const base::FilePath& target_path, + const std::vector<GURL>& url_chain, + const GURL& referrer_url, + const GURL& site_url, + const GURL& tab_url, + const GURL& tab_refererr_url, + const std::string& mime_type, + const std::string& original_mime_type, + base::Time start_time, + base::Time end_time, + const std::string& etag, + const std::string& last_modified, + int64_t received_bytes, + int64_t total_bytes, + const std::string& hash, + DownloadItem::DownloadState state, + DownloadDangerType danger_type, + DownloadInterruptReason interrupt_reason, + bool opened, + base::Time last_access_time, + bool transient, + const std::vector<DownloadItem::ReceivedSlice>& received_slices) + : request_info_(url_chain, + referrer_url, + site_url, + tab_url, + tab_refererr_url, + std::string(), + base::FilePath(), + ui::PAGE_TRANSITION_LINK, + false, + std::string(), + start_time), + guid_(guid), + download_id_(download_id), + mime_type_(mime_type), + original_mime_type_(original_mime_type), + total_bytes_(total_bytes), + last_reason_(interrupt_reason), + start_tick_(base::TimeTicks()), + state_(ExternalToInternalState(state)), + danger_type_(danger_type), + delegate_(delegate), + opened_(opened), + last_access_time_(last_access_time), + transient_(transient), + destination_info_(target_path, + current_path, + received_bytes, + state == COMPLETE, + hash, + end_time), + last_modified_time_(last_modified), + etag_(etag), + received_slices_(received_slices), + is_updating_observers_(false), + weak_ptr_factory_(this) { + delegate_->Attach(); + DCHECK(state_ == COMPLETE_INTERNAL || state_ == INTERRUPTED_INTERNAL || + state_ == CANCELLED_INTERNAL); + DCHECK(base::IsValidGUID(guid_)); + Init(false /* not actively downloading */, TYPE_HISTORY_IMPORT); +} + +// Constructing for a regular download: +DownloadItemImpl::DownloadItemImpl(DownloadItemImplDelegate* delegate, + uint32_t download_id, + const DownloadCreateInfo& info) + : request_info_(info.url_chain, + info.referrer_url, + info.site_url, + info.tab_url, + info.tab_referrer_url, + base::UTF16ToUTF8(info.save_info->suggested_name), + info.save_info->file_path, + info.transition_type ? info.transition_type.value() + : ui::PAGE_TRANSITION_LINK, + info.has_user_gesture, + info.remote_address, + info.start_time), + guid_(info.guid.empty() ? base::GenerateGUID() : info.guid), + download_id_(download_id), + response_headers_(info.response_headers), + content_disposition_(info.content_disposition), + mime_type_(info.mime_type), + original_mime_type_(info.original_mime_type), + total_bytes_(info.total_bytes), + last_reason_(info.result), + start_tick_(base::TimeTicks::Now()), + state_(INITIAL_INTERNAL), + delegate_(delegate), + is_temporary_(!info.transient && !info.save_info->file_path.empty()), + transient_(info.transient), + destination_info_(info.save_info->prompt_for_save_location + ? TARGET_DISPOSITION_PROMPT + : TARGET_DISPOSITION_OVERWRITE), + last_modified_time_(info.last_modified), + etag_(info.etag), + is_updating_observers_(false), + fetch_error_body_(info.fetch_error_body), + request_headers_(info.request_headers), + download_source_(info.download_source), + weak_ptr_factory_(this) { + delegate_->Attach(); + Init(true /* actively downloading */, TYPE_ACTIVE_DOWNLOAD); + + TRACE_EVENT_INSTANT0("download", "DownloadStarted", TRACE_EVENT_SCOPE_THREAD); +} + +// Constructing for the "Save Page As..." feature: +DownloadItemImpl::DownloadItemImpl( + DownloadItemImplDelegate* delegate, + uint32_t download_id, + const base::FilePath& path, + const GURL& url, + const std::string& mime_type, + std::unique_ptr<DownloadRequestHandleInterface> request_handle) + : request_info_(url), + guid_(base::GenerateGUID()), + download_id_(download_id), + mime_type_(mime_type), + original_mime_type_(mime_type), + start_tick_(base::TimeTicks::Now()), + state_(IN_PROGRESS_INTERNAL), + delegate_(delegate), + destination_info_(path, path, 0, false, std::string(), base::Time()), + is_updating_observers_(false), + weak_ptr_factory_(this) { + job_ = DownloadJobFactory::CreateJob(this, std::move(request_handle), + DownloadCreateInfo(), true, nullptr, + nullptr); + delegate_->Attach(); + Init(true /* actively downloading */, TYPE_SAVE_PAGE_AS); +} + +DownloadItemImpl::~DownloadItemImpl() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + // Should always have been nuked before now, at worst in + // DownloadManager shutdown. + DCHECK(!download_file_.get()); + CHECK(!is_updating_observers_); + + for (auto& observer : observers_) + observer.OnDownloadDestroyed(this); + delegate_->AssertStateConsistent(this); + delegate_->Detach(); +} + +void DownloadItemImpl::AddObserver(Observer* observer) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + observers_.AddObserver(observer); +} + +void DownloadItemImpl::RemoveObserver(Observer* observer) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + observers_.RemoveObserver(observer); +} + +void DownloadItemImpl::UpdateObservers() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DVLOG(20) << __func__ << "()"; + + // Nested updates should not be allowed. + DCHECK(!is_updating_observers_); + + is_updating_observers_ = true; + for (auto& observer : observers_) + observer.OnDownloadUpdated(this); + is_updating_observers_ = false; +} + +void DownloadItemImpl::ValidateDangerousDownload() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(!IsDone()); + DCHECK(IsDangerous()); + + DVLOG(20) << __func__ << "() download=" << DebugString(true); + + if (IsDone() || !IsDangerous()) + return; + + RecordDangerousDownloadAccept(GetDangerType(), GetTargetFilePath()); + + danger_type_ = DOWNLOAD_DANGER_TYPE_USER_VALIDATED; + + TRACE_EVENT_INSTANT1("download", "DownloadItemSaftyStateUpdated", + TRACE_EVENT_SCOPE_THREAD, "danger_type", + GetDownloadDangerNames(danger_type_).c_str()); + + UpdateObservers(); // TODO(asanka): This is potentially unsafe. The download + // may not be in a consistent state or around at all after + // invoking observers. http://crbug.com/586610 + + MaybeCompleteDownload(); +} + +void DownloadItemImpl::StealDangerousDownload( + bool delete_file_afterward, + const AcquireFileCallback& callback) { + DVLOG(20) << __func__ << "() download = " << DebugString(true); + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(IsDangerous()); + DCHECK(AllDataSaved()); + + if (delete_file_afterward) { + if (download_file_) { + base::PostTaskAndReplyWithResult( + GetDownloadTaskRunner().get(), FROM_HERE, + base::Bind(&DownloadFileDetach, base::Passed(&download_file_)), + callback); + } else { + callback.Run(GetFullPath()); + } + destination_info_.current_path.clear(); + Remove(); + // Download item has now been deleted. + } else if (download_file_) { + base::PostTaskAndReplyWithResult( + GetDownloadTaskRunner().get(), FROM_HERE, + base::Bind(&MakeCopyOfDownloadFile, download_file_.get()), callback); + } else { + callback.Run(GetFullPath()); + } +} + +void DownloadItemImpl::Pause() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + // Ignore irrelevant states. + if (IsPaused()) + return; + + switch (state_) { + case CANCELLED_INTERNAL: + case COMPLETE_INTERNAL: + case COMPLETING_INTERNAL: + case INITIAL_INTERNAL: + case INTERRUPTED_INTERNAL: + case INTERRUPTED_TARGET_PENDING_INTERNAL: + case RESUMING_INTERNAL: + // No active request. + // TODO(asanka): In the case of RESUMING_INTERNAL, consider setting + // |DownloadJob::is_paused_| even if there's no request currently + // associated with this DII. When a request is assigned (due to a + // resumption, for example) we can honor the |DownloadJob::is_paused_| + // setting. + return; + + case IN_PROGRESS_INTERNAL: + case TARGET_PENDING_INTERNAL: + job_->Pause(); + UpdateObservers(); + return; + + case MAX_DOWNLOAD_INTERNAL_STATE: + case TARGET_RESOLVED_INTERNAL: + NOTREACHED(); + } +} + +void DownloadItemImpl::Resume() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DVLOG(20) << __func__ << "() download = " << DebugString(true); + switch (state_) { + case CANCELLED_INTERNAL: // Nothing to resume. + case COMPLETE_INTERNAL: + case COMPLETING_INTERNAL: + case INITIAL_INTERNAL: + case INTERRUPTED_TARGET_PENDING_INTERNAL: + case RESUMING_INTERNAL: // Resumption in progress. + return; + + case TARGET_PENDING_INTERNAL: + case IN_PROGRESS_INTERNAL: + if (!IsPaused()) + return; + if (job_) + job_->Resume(true); + UpdateObservers(); + return; + + case INTERRUPTED_INTERNAL: + auto_resume_count_ = 0; // User input resets the counter. + ResumeInterruptedDownload(ResumptionRequestSource::USER); + UpdateObservers(); + return; + + case MAX_DOWNLOAD_INTERNAL_STATE: + case TARGET_RESOLVED_INTERNAL: + NOTREACHED(); + } +} + +void DownloadItemImpl::Cancel(bool user_cancel) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DVLOG(20) << __func__ << "() download = " << DebugString(true); + InterruptAndDiscardPartialState( + user_cancel ? DOWNLOAD_INTERRUPT_REASON_USER_CANCELED + : DOWNLOAD_INTERRUPT_REASON_USER_SHUTDOWN); + UpdateObservers(); +} + +void DownloadItemImpl::Remove() { + DVLOG(20) << __func__ << "() download = " << DebugString(true); + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + RecordDownloadDeletion(GetEndTime(), GetMimeType()); + + delegate_->AssertStateConsistent(this); + InterruptAndDiscardPartialState(DOWNLOAD_INTERRUPT_REASON_USER_CANCELED); + UpdateObservers(); + delegate_->AssertStateConsistent(this); + + NotifyRemoved(); + delegate_->DownloadRemoved(this); + // We have now been deleted. +} + +void DownloadItemImpl::OpenDownload() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + if (!IsDone()) { + // We don't honor the open_when_complete_ flag for temporary + // downloads. Don't set it because it shows up in the UI. + if (!IsTemporary()) + open_when_complete_ = !open_when_complete_; + return; + } + + if (state_ != COMPLETE_INTERNAL || file_externally_removed_) + return; + + // Ideally, we want to detect errors in opening and report them, but we + // don't generally have the proper interface for that to the external + // program that opens the file. So instead we spawn a check to update + // the UI if the file has been deleted in parallel with the open. + delegate_->CheckForFileRemoval(this); + RecordOpen(GetEndTime(), !GetOpened()); + opened_ = true; + last_access_time_ = base::Time::Now(); + for (auto& observer : observers_) + observer.OnDownloadOpened(this); + delegate_->OpenDownload(this); +} + +void DownloadItemImpl::ShowDownloadInShell() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + delegate_->ShowDownloadInShell(this); +} + +uint32_t DownloadItemImpl::GetId() const { + return download_id_; +} + +const std::string& DownloadItemImpl::GetGuid() const { + return guid_; +} + +DownloadItem::DownloadState DownloadItemImpl::GetState() const { + return InternalToExternalState(state_); +} + +DownloadInterruptReason DownloadItemImpl::GetLastReason() const { + return last_reason_; +} + +bool DownloadItemImpl::IsPaused() const { + return job_ ? job_->is_paused() : false; +} + +bool DownloadItemImpl::IsTemporary() const { + return is_temporary_; +} + +bool DownloadItemImpl::CanResume() const { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + switch (state_) { + case INITIAL_INTERNAL: + case COMPLETING_INTERNAL: + case COMPLETE_INTERNAL: + case CANCELLED_INTERNAL: + case RESUMING_INTERNAL: + case INTERRUPTED_TARGET_PENDING_INTERNAL: + return false; + + case TARGET_PENDING_INTERNAL: + case TARGET_RESOLVED_INTERNAL: + case IN_PROGRESS_INTERNAL: + return IsPaused(); + + case INTERRUPTED_INTERNAL: { + ResumeMode resume_mode = GetResumeMode(); + // Only allow Resume() calls if the resumption mode requires a user + // action. + return resume_mode == ResumeMode::USER_RESTART || + resume_mode == ResumeMode::USER_CONTINUE; + } + + case MAX_DOWNLOAD_INTERNAL_STATE: + NOTREACHED(); + } + return false; +} + +bool DownloadItemImpl::IsDone() const { + switch (state_) { + case INITIAL_INTERNAL: + case COMPLETING_INTERNAL: + case RESUMING_INTERNAL: + case TARGET_PENDING_INTERNAL: + case INTERRUPTED_TARGET_PENDING_INTERNAL: + case TARGET_RESOLVED_INTERNAL: + case IN_PROGRESS_INTERNAL: + return false; + + case COMPLETE_INTERNAL: + case CANCELLED_INTERNAL: + return true; + + case INTERRUPTED_INTERNAL: + return !CanResume(); + + case MAX_DOWNLOAD_INTERNAL_STATE: + NOTREACHED(); + } + return false; +} + +int64_t DownloadItemImpl::GetBytesWasted() const { + return bytes_wasted_; +} + +const GURL& DownloadItemImpl::GetURL() const { + return request_info_.url_chain.empty() ? GURL::EmptyGURL() + : request_info_.url_chain.back(); +} + +const std::vector<GURL>& DownloadItemImpl::GetUrlChain() const { + return request_info_.url_chain; +} + +const GURL& DownloadItemImpl::GetOriginalUrl() const { + // Be careful about taking the front() of possibly-empty vectors! + // http://crbug.com/190096 + return request_info_.url_chain.empty() ? GURL::EmptyGURL() + : request_info_.url_chain.front(); +} + +const GURL& DownloadItemImpl::GetReferrerUrl() const { + return request_info_.referrer_url; +} + +const GURL& DownloadItemImpl::GetSiteUrl() const { + return request_info_.site_url; +} + +const GURL& DownloadItemImpl::GetTabUrl() const { + return request_info_.tab_url; +} + +const GURL& DownloadItemImpl::GetTabReferrerUrl() const { + return request_info_.tab_referrer_url; +} + +std::string DownloadItemImpl::GetSuggestedFilename() const { + return request_info_.suggested_filename; +} + +const scoped_refptr<const net::HttpResponseHeaders>& +DownloadItemImpl::GetResponseHeaders() const { + return response_headers_; +} + +std::string DownloadItemImpl::GetContentDisposition() const { + return content_disposition_; +} + +std::string DownloadItemImpl::GetMimeType() const { + return mime_type_; +} + +std::string DownloadItemImpl::GetOriginalMimeType() const { + return original_mime_type_; +} + +std::string DownloadItemImpl::GetRemoteAddress() const { + return request_info_.remote_address; +} + +bool DownloadItemImpl::HasUserGesture() const { + return request_info_.has_user_gesture; +} + +ui::PageTransition DownloadItemImpl::GetTransitionType() const { + return request_info_.transition_type; +} + +const std::string& DownloadItemImpl::GetLastModifiedTime() const { + return last_modified_time_; +} + +const std::string& DownloadItemImpl::GetETag() const { + return etag_; +} + +bool DownloadItemImpl::IsSavePackageDownload() const { + return job_ && job_->IsSavePackageDownload(); +} + +const base::FilePath& DownloadItemImpl::GetFullPath() const { + return destination_info_.current_path; +} + +const base::FilePath& DownloadItemImpl::GetTargetFilePath() const { + return destination_info_.target_path; +} + +const base::FilePath& DownloadItemImpl::GetForcedFilePath() const { + // TODO(asanka): Get rid of GetForcedFilePath(). We should instead just + // require that clients respect GetTargetFilePath() if it is already set. + return request_info_.forced_file_path; +} + +base::FilePath DownloadItemImpl::GetFileNameToReportUser() const { + if (!display_name_.empty()) + return display_name_; + return GetTargetFilePath().BaseName(); +} + +DownloadItem::TargetDisposition DownloadItemImpl::GetTargetDisposition() const { + return destination_info_.target_disposition; +} + +const std::string& DownloadItemImpl::GetHash() const { + return destination_info_.hash; +} + +bool DownloadItemImpl::GetFileExternallyRemoved() const { + return file_externally_removed_; +} + +void DownloadItemImpl::DeleteFile(const base::Callback<void(bool)>& callback) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + if (GetState() != DownloadItem::COMPLETE) { + // Pass a null WeakPtr so it doesn't call OnDownloadedFileRemoved. + base::SequencedTaskRunnerHandle::Get()->PostTask( + FROM_HERE, + base::BindOnce(&DeleteDownloadedFileDone, + base::WeakPtr<DownloadItemImpl>(), callback, false)); + return; + } + if (GetFullPath().empty() || file_externally_removed_) { + // Pass a null WeakPtr so it doesn't call OnDownloadedFileRemoved. + base::SequencedTaskRunnerHandle::Get()->PostTask( + FROM_HERE, + base::BindOnce(&DeleteDownloadedFileDone, + base::WeakPtr<DownloadItemImpl>(), callback, true)); + return; + } + base::PostTaskAndReplyWithResult( + GetDownloadTaskRunner().get(), FROM_HERE, + base::Bind(&DeleteDownloadedFile, GetFullPath()), + base::Bind(&DeleteDownloadedFileDone, weak_ptr_factory_.GetWeakPtr(), + callback)); +} + +DownloadFile* DownloadItemImpl::GetDownloadFile() { + return download_file_.get(); +} + +bool DownloadItemImpl::IsDangerous() const { + return (danger_type_ == DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE || + danger_type_ == DOWNLOAD_DANGER_TYPE_DANGEROUS_URL || + danger_type_ == DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT || + danger_type_ == DOWNLOAD_DANGER_TYPE_UNCOMMON_CONTENT || + danger_type_ == DOWNLOAD_DANGER_TYPE_DANGEROUS_HOST || + danger_type_ == DOWNLOAD_DANGER_TYPE_POTENTIALLY_UNWANTED); +} + +DownloadDangerType DownloadItemImpl::GetDangerType() const { + return danger_type_; +} + +bool DownloadItemImpl::TimeRemaining(base::TimeDelta* remaining) const { + if (total_bytes_ <= 0) + return false; // We never received the content_length for this download. + + int64_t speed = CurrentSpeed(); + if (speed == 0) + return false; + + *remaining = + base::TimeDelta::FromSeconds((total_bytes_ - GetReceivedBytes()) / speed); + return true; +} + +int64_t DownloadItemImpl::CurrentSpeed() const { + if (IsPaused()) + return 0; + return bytes_per_sec_; +} + +int DownloadItemImpl::PercentComplete() const { + // If the delegate is delaying completion of the download, then we have no + // idea how long it will take. + if (delegate_delayed_complete_ || total_bytes_ <= 0) + return -1; + + return static_cast<int>(GetReceivedBytes() * 100.0 / total_bytes_); +} + +bool DownloadItemImpl::AllDataSaved() const { + return destination_info_.all_data_saved; +} + +int64_t DownloadItemImpl::GetTotalBytes() const { + return total_bytes_; +} + +int64_t DownloadItemImpl::GetReceivedBytes() const { + return destination_info_.received_bytes; +} + +const std::vector<DownloadItem::ReceivedSlice>& +DownloadItemImpl::GetReceivedSlices() const { + return received_slices_; +} + +base::Time DownloadItemImpl::GetStartTime() const { + return request_info_.start_time; +} + +base::Time DownloadItemImpl::GetEndTime() const { + return destination_info_.end_time; +} + +bool DownloadItemImpl::CanShowInFolder() { + // A download can be shown in the folder if the downloaded file is in a known + // location. + return CanOpenDownload() && !GetFullPath().empty(); +} + +bool DownloadItemImpl::CanOpenDownload() { + // We can open the file or mark it for opening on completion if the download + // is expected to complete successfully. Exclude temporary downloads, since + // they aren't owned by the download system. + const bool is_complete = GetState() == DownloadItem::COMPLETE; + return (!IsDone() || is_complete) && !IsTemporary() && + !file_externally_removed_ && + delegate_->IsMostRecentDownloadItemAtFilePath(this); +} + +bool DownloadItemImpl::ShouldOpenFileBasedOnExtension() { + return delegate_->ShouldOpenFileBasedOnExtension(GetTargetFilePath()); +} + +bool DownloadItemImpl::GetOpenWhenComplete() const { + return open_when_complete_; +} + +bool DownloadItemImpl::GetAutoOpened() { + return auto_opened_; +} + +bool DownloadItemImpl::GetOpened() const { + return opened_; +} + +base::Time DownloadItemImpl::GetLastAccessTime() const { + return last_access_time_; +} + +bool DownloadItemImpl::IsTransient() const { + return transient_; +} + +void DownloadItemImpl::OnContentCheckCompleted(DownloadDangerType danger_type, + DownloadInterruptReason reason) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(AllDataSaved()); + + // Danger type is only allowed to be set on an active download after all data + // has been saved. This excludes all other states. In particular, + // OnContentCheckCompleted() isn't allowed on an INTERRUPTED download since + // such an interruption would need to happen between OnAllDataSaved() and + // OnContentCheckCompleted() during which no disk or network activity + // should've taken place. + DCHECK_EQ(state_, IN_PROGRESS_INTERNAL); + DVLOG(20) << __func__ << "() danger_type=" << danger_type + << " download=" << DebugString(true); + SetDangerType(danger_type); + if (reason != DOWNLOAD_INTERRUPT_REASON_NONE) { + InterruptAndDiscardPartialState(reason); + DCHECK_EQ(ResumeMode::INVALID, GetResumeMode()); + } + UpdateObservers(); +} + +void DownloadItemImpl::SetOpenWhenComplete(bool open) { + open_when_complete_ = open; +} + +void DownloadItemImpl::SetOpened(bool opened) { + opened_ = opened; +} + +void DownloadItemImpl::SetLastAccessTime(base::Time last_access_time) { + last_access_time_ = last_access_time; + UpdateObservers(); +} + +void DownloadItemImpl::SetDisplayName(const base::FilePath& name) { + display_name_ = name; +} + +std::string DownloadItemImpl::DebugString(bool verbose) const { + std::string description = base::StringPrintf( + "{ id = %d" + " state = %s", + download_id_, DebugDownloadStateString(state_)); + + // Construct a string of the URL chain. + std::string url_list("<none>"); + if (!request_info_.url_chain.empty()) { + std::vector<GURL>::const_iterator iter = request_info_.url_chain.begin(); + std::vector<GURL>::const_iterator last = request_info_.url_chain.end(); + url_list = (*iter).is_valid() ? (*iter).spec() : "<invalid>"; + ++iter; + for (; verbose && (iter != last); ++iter) { + url_list += " ->\n\t"; + const GURL& next_url = *iter; + url_list += next_url.is_valid() ? next_url.spec() : "<invalid>"; + } + } + + if (verbose) { + description += base::StringPrintf( + " total = %" PRId64 " received = %" PRId64 + " reason = %s" + " paused = %c" + " resume_mode = %s" + " auto_resume_count = %d" + " danger = %d" + " all_data_saved = %c" + " last_modified = '%s'" + " etag = '%s'" + " has_download_file = %s" + " url_chain = \n\t\"%s\"\n\t" + " current_path = \"%" PRFilePath + "\"\n\t" + " target_path = \"%" PRFilePath + "\"" + " referrer = \"%s\"" + " site_url = \"%s\"", + GetTotalBytes(), GetReceivedBytes(), + DownloadInterruptReasonToString(last_reason_).c_str(), + IsPaused() ? 'T' : 'F', DebugResumeModeString(GetResumeMode()), + auto_resume_count_, GetDangerType(), AllDataSaved() ? 'T' : 'F', + GetLastModifiedTime().c_str(), GetETag().c_str(), + download_file_.get() ? "true" : "false", url_list.c_str(), + GetFullPath().value().c_str(), GetTargetFilePath().value().c_str(), + GetReferrerUrl().spec().c_str(), GetSiteUrl().spec().c_str()); + } else { + description += base::StringPrintf(" url = \"%s\"", url_list.c_str()); + } + + description += " }"; + + return description; +} + +void DownloadItemImpl::SimulateErrorForTesting(DownloadInterruptReason reason) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + InterruptWithPartialState(GetReceivedBytes(), nullptr, reason); + UpdateObservers(); +} + +ResumeMode DownloadItemImpl::GetResumeMode() const { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + // Only support resumption for HTTP(S). + if (!GetURL().SchemeIsHTTPOrHTTPS()) + return ResumeMode::INVALID; + + // We can't continue without a handle on the intermediate file. + // We also can't continue if we don't have some verifier to make sure + // we're getting the same file. + bool restart_required = + (GetFullPath().empty() || (etag_.empty() && last_modified_time_.empty())); + + // We won't auto-restart if we've used up our attempts or the + // download has been paused by user action. + bool user_action_required = + (auto_resume_count_ >= kMaxAutoResumeAttempts || IsPaused()); + + switch (last_reason_) { + case DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR: + case DOWNLOAD_INTERRUPT_REASON_NETWORK_TIMEOUT: + case DOWNLOAD_INTERRUPT_REASON_SERVER_CONTENT_LENGTH_MISMATCH: + break; + + case DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE: + // The server disagreed with the file offset that we sent. + + case DOWNLOAD_INTERRUPT_REASON_FILE_HASH_MISMATCH: + // The file on disk was found to not match the expected hash. Discard and + // start from beginning. + + case DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT: + // The [possibly persisted] file offset disagreed with the file on disk. + + // The intermediate stub is not usable and the server is responding. Hence + // retrying the request from the beginning is likely to work. + restart_required = true; + break; + + case DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED: + case DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED: + case DOWNLOAD_INTERRUPT_REASON_NETWORK_SERVER_DOWN: + case DOWNLOAD_INTERRUPT_REASON_SERVER_FAILED: + case DOWNLOAD_INTERRUPT_REASON_SERVER_UNREACHABLE: + case DOWNLOAD_INTERRUPT_REASON_USER_SHUTDOWN: + case DOWNLOAD_INTERRUPT_REASON_CRASH: + // It is not clear whether attempting a resumption is acceptable at this + // time or whether it would work at all. Hence allow the user to retry the + // download manually. + user_action_required = true; + break; + + case DOWNLOAD_INTERRUPT_REASON_FILE_NO_SPACE: + // There was no space. Require user interaction so that the user may, for + // example, choose a different location to store the file. Or they may + // free up some space on the targret device and retry. But try to reuse + // the partial stub. + user_action_required = true; + break; + + case DOWNLOAD_INTERRUPT_REASON_FILE_FAILED: + case DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED: + case DOWNLOAD_INTERRUPT_REASON_FILE_NAME_TOO_LONG: + case DOWNLOAD_INTERRUPT_REASON_FILE_TOO_LARGE: + // Assume the partial stub is unusable. Also it may not be possible to + // restart immediately. + user_action_required = true; + restart_required = true; + break; + + case DOWNLOAD_INTERRUPT_REASON_NONE: + case DOWNLOAD_INTERRUPT_REASON_NETWORK_INVALID_REQUEST: + case DOWNLOAD_INTERRUPT_REASON_FILE_VIRUS_INFECTED: + case DOWNLOAD_INTERRUPT_REASON_SERVER_BAD_CONTENT: + case DOWNLOAD_INTERRUPT_REASON_USER_CANCELED: + case DOWNLOAD_INTERRUPT_REASON_FILE_BLOCKED: + case DOWNLOAD_INTERRUPT_REASON_FILE_SECURITY_CHECK_FAILED: + case DOWNLOAD_INTERRUPT_REASON_SERVER_UNAUTHORIZED: + case DOWNLOAD_INTERRUPT_REASON_SERVER_CERT_PROBLEM: + case DOWNLOAD_INTERRUPT_REASON_SERVER_FORBIDDEN: + case DOWNLOAD_INTERRUPT_REASON_FILE_SAME_AS_SOURCE: + return ResumeMode::INVALID; + } + + if (user_action_required && restart_required) + return ResumeMode::USER_RESTART; + + if (restart_required) + return ResumeMode::IMMEDIATE_RESTART; + + if (user_action_required) + return ResumeMode::USER_CONTINUE; + + return ResumeMode::IMMEDIATE_CONTINUE; +} + +void DownloadItemImpl::UpdateValidatorsOnResumption( + const DownloadCreateInfo& new_create_info) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK_EQ(RESUMING_INTERNAL, state_); + DCHECK(!new_create_info.url_chain.empty()); + + // We are going to tack on any new redirects to our list of redirects. + // When a download is resumed, the URL used for the resumption request is the + // one at the end of the previous redirect chain. Tacking additional redirects + // to the end of this chain ensures that: + // - If the download needs to be resumed again, the ETag/Last-Modified headers + // will be used with the last server that sent them to us. + // - The redirect chain contains all the servers that were involved in this + // download since the initial request, in order. + std::vector<GURL>::const_iterator chain_iter = + new_create_info.url_chain.begin(); + if (*chain_iter == request_info_.url_chain.back()) + ++chain_iter; + + // Record some stats. If the precondition failed (the server returned + // HTTP_PRECONDITION_FAILED), then the download will automatically retried as + // a full request rather than a partial. Full restarts clobber validators. + int origin_state = 0; + bool is_partial = GetReceivedBytes() > 0; + if (chain_iter != new_create_info.url_chain.end()) + origin_state |= ORIGIN_STATE_ON_RESUMPTION_ADDITIONAL_REDIRECTS; + if (etag_ != new_create_info.etag || + last_modified_time_ != new_create_info.last_modified) { + received_slices_.clear(); + destination_info_.received_bytes = 0; + origin_state |= ORIGIN_STATE_ON_RESUMPTION_VALIDATORS_CHANGED; + } + if (content_disposition_ != new_create_info.content_disposition) + origin_state |= ORIGIN_STATE_ON_RESUMPTION_CONTENT_DISPOSITION_CHANGED; + RecordOriginStateOnResumption( + is_partial, static_cast<OriginStateOnResumption>(origin_state)); + + request_info_.url_chain.insert(request_info_.url_chain.end(), chain_iter, + new_create_info.url_chain.end()); + etag_ = new_create_info.etag; + last_modified_time_ = new_create_info.last_modified; + response_headers_ = new_create_info.response_headers; + content_disposition_ = new_create_info.content_disposition; + // It is possible that the previous download attempt failed right before the + // response is received. Need to reset the MIME type. + mime_type_ = new_create_info.mime_type; + + // Don't update observers. This method is expected to be called just before a + // DownloadFile is created and Start() is called. The observers will be + // notified when the download transitions to the IN_PROGRESS state. +} + +void DownloadItemImpl::NotifyRemoved() { + for (auto& observer : observers_) + observer.OnDownloadRemoved(this); +} + +void DownloadItemImpl::OnDownloadedFileRemoved() { + file_externally_removed_ = true; + DVLOG(20) << __func__ << "() download=" << DebugString(true); + UpdateObservers(); +} + +base::WeakPtr<DownloadDestinationObserver> +DownloadItemImpl::DestinationObserverAsWeakPtr() { + return weak_ptr_factory_.GetWeakPtr(); +} + +void DownloadItemImpl::SetTotalBytes(int64_t total_bytes) { + total_bytes_ = total_bytes; +} + +void DownloadItemImpl::OnAllDataSaved( + int64_t total_bytes, + std::unique_ptr<crypto::SecureHash> hash_state) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(!AllDataSaved()); + destination_info_.all_data_saved = true; + SetTotalBytes(total_bytes); + UpdateProgress(total_bytes, 0); + received_slices_.clear(); + SetHashState(std::move(hash_state)); + hash_state_.reset(); // No need to retain hash_state_ since we are done with + // the download and don't expect to receive any more + // data. + + if (received_bytes_at_length_mismatch_ > 0) { + if (total_bytes > received_bytes_at_length_mismatch_) { + RecordDownloadCountWithSource( + MORE_BYTES_RECEIVED_AFTER_CONTENT_LENGTH_MISMATCH_COUNT, + download_source_); + } else if (total_bytes == received_bytes_at_length_mismatch_) { + RecordDownloadCountWithSource( + NO_BYTES_RECEIVED_AFTER_CONTENT_LENGTH_MISMATCH_COUNT, + download_source_); + } else { + // This could happen if the content changes on the server. + } + } + DVLOG(20) << __func__ << "() download=" << DebugString(true); + UpdateObservers(); +} + +void DownloadItemImpl::MarkAsComplete() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + DCHECK(AllDataSaved()); + destination_info_.end_time = base::Time::Now(); + TransitionTo(COMPLETE_INTERNAL); + UpdateObservers(); +} + +void DownloadItemImpl::DestinationUpdate( + int64_t bytes_so_far, + int64_t bytes_per_sec, + const std::vector<DownloadItem::ReceivedSlice>& received_slices) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + // If the download is in any other state we don't expect any + // DownloadDestinationObserver callbacks. An interruption or a cancellation + // results in a call to ReleaseDownloadFile which invalidates the weak + // reference held by the DownloadFile and hence cuts off any pending + // callbacks. + DCHECK(state_ == TARGET_PENDING_INTERNAL || state_ == IN_PROGRESS_INTERNAL || + state_ == INTERRUPTED_TARGET_PENDING_INTERNAL); + + // There must be no pending deferred_interrupt_reason_. + DCHECK(deferred_interrupt_reason_ == DOWNLOAD_INTERRUPT_REASON_NONE || + state_ == INTERRUPTED_TARGET_PENDING_INTERNAL); + + DVLOG(20) << __func__ << "() so_far=" << bytes_so_far + << " per_sec=" << bytes_per_sec + << " download=" << DebugString(true); + + UpdateProgress(bytes_so_far, bytes_per_sec); + received_slices_ = received_slices; + TRACE_EVENT_INSTANT1("download", "DownloadItemUpdated", + TRACE_EVENT_SCOPE_THREAD, "bytes_so_far", + GetReceivedBytes()); + + UpdateObservers(); +} + +void DownloadItemImpl::DestinationError( + DownloadInterruptReason reason, + int64_t bytes_so_far, + std::unique_ptr<crypto::SecureHash> secure_hash) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + // If the download is in any other state we don't expect any + // DownloadDestinationObserver callbacks. An interruption or a cancellation + // results in a call to ReleaseDownloadFile which invalidates the weak + // reference held by the DownloadFile and hence cuts off any pending + // callbacks. + DCHECK(state_ == TARGET_PENDING_INTERNAL || state_ == IN_PROGRESS_INTERNAL); + DVLOG(20) << __func__ + << "() reason:" << DownloadInterruptReasonToString(reason) + << " this:" << DebugString(true); + + InterruptWithPartialState(bytes_so_far, std::move(secure_hash), reason); + UpdateObservers(); +} + +void DownloadItemImpl::DestinationCompleted( + int64_t total_bytes, + std::unique_ptr<crypto::SecureHash> secure_hash) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + // If the download is in any other state we don't expect any + // DownloadDestinationObserver callbacks. An interruption or a cancellation + // results in a call to ReleaseDownloadFile which invalidates the weak + // reference held by the DownloadFile and hence cuts off any pending + // callbacks. + DCHECK(state_ == TARGET_PENDING_INTERNAL || state_ == IN_PROGRESS_INTERNAL || + state_ == INTERRUPTED_TARGET_PENDING_INTERNAL); + DVLOG(20) << __func__ << "() download=" << DebugString(true); + + OnAllDataSaved(total_bytes, std::move(secure_hash)); + MaybeCompleteDownload(); +} + +// **** Download progression cascade + +void DownloadItemImpl::Init(bool active, + DownloadItem::DownloadType download_type) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + std::string file_name; + if (download_type == TYPE_HISTORY_IMPORT) { + // target_path_ works for History and Save As versions. + file_name = GetTargetFilePath().AsUTF8Unsafe(); + } else { + // See if it's set programmatically. + file_name = GetForcedFilePath().AsUTF8Unsafe(); + // Possibly has a 'download' attribute for the anchor. + if (file_name.empty()) + file_name = GetSuggestedFilename(); + // From the URL file name. + if (file_name.empty()) + file_name = GetURL().ExtractFileName(); + } + + auto active_data = std::make_unique<DownloadItemActivatedData>( + download_type, GetId(), GetOriginalUrl().spec(), GetURL().spec(), + file_name, GetDangerType(), GetReceivedBytes(), HasUserGesture()); + + if (active) { + TRACE_EVENT_ASYNC_BEGIN1("download", "DownloadItemActive", download_id_, + "download_item", std::move(active_data)); + } else { + TRACE_EVENT_INSTANT1("download", "DownloadItemActive", + TRACE_EVENT_SCOPE_THREAD, "download_item", + std::move(active_data)); + + // Read data from in-progress cache. + auto in_progress_entry = delegate_->GetInProgressEntry(this); + if (in_progress_entry) { + download_source_ = in_progress_entry->download_source; + fetch_error_body_ = in_progress_entry->fetch_error_body; + request_headers_ = in_progress_entry->request_headers; + } + } + + DVLOG(20) << __func__ << "() " << DebugString(true); +} + +// We're starting the download. +void DownloadItemImpl::Start( + std::unique_ptr<DownloadFile> file, + std::unique_ptr<DownloadRequestHandleInterface> req_handle, + const DownloadCreateInfo& new_create_info, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + net::URLRequestContextGetter* url_request_context_getter) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(!download_file_.get()); + DVLOG(20) << __func__ << "() this=" << DebugString(true); + RecordDownloadCountWithSource(START_COUNT, download_source_); + + download_file_ = std::move(file); + job_ = DownloadJobFactory::CreateJob( + this, std::move(req_handle), new_create_info, false, + std::move(shared_url_loader_factory), url_request_context_getter); + if (job_->IsParallelizable()) { + RecordParallelizableDownloadCount(START_COUNT, IsParallelDownloadEnabled()); + } + + deferred_interrupt_reason_ = DOWNLOAD_INTERRUPT_REASON_NONE; + + if (state_ == CANCELLED_INTERNAL) { + // The download was in the process of resuming when it was cancelled. Don't + // proceed. + ReleaseDownloadFile(true); + job_->Cancel(true); + return; + } + + // The state could be one of the following: + // + // INITIAL_INTERNAL: A normal download attempt. + // + // RESUMING_INTERNAL: A resumption attempt. May or may not have been + // successful. + DCHECK(state_ == INITIAL_INTERNAL || state_ == RESUMING_INTERNAL); + + // If the state_ is INITIAL_INTERNAL, then the target path must be empty. + DCHECK(state_ != INITIAL_INTERNAL || GetTargetFilePath().empty()); + + // If a resumption attempted failed, or if the download was DOA, then the + // download should go back to being interrupted. + if (new_create_info.result != DOWNLOAD_INTERRUPT_REASON_NONE) { + DCHECK(!download_file_.get()); + + // Download requests that are interrupted by Start() should result in a + // DownloadCreateInfo with an intact DownloadSaveInfo. + DCHECK(new_create_info.save_info); + + int64_t offset = new_create_info.save_info->offset; + std::unique_ptr<crypto::SecureHash> hash_state = + new_create_info.save_info->hash_state + ? new_create_info.save_info->hash_state->Clone() + : nullptr; + + destination_info_.received_bytes = offset; + hash_state_ = std::move(hash_state); + destination_info_.hash.clear(); + deferred_interrupt_reason_ = new_create_info.result; + received_slices_.clear(); + TransitionTo(INTERRUPTED_TARGET_PENDING_INTERNAL); + DetermineDownloadTarget(); + return; + } + + if (state_ == INITIAL_INTERNAL) { + RecordDownloadCountWithSource(NEW_DOWNLOAD_COUNT, download_source_); + if (job_->IsParallelizable()) { + RecordParallelizableDownloadCount(NEW_DOWNLOAD_COUNT, + IsParallelDownloadEnabled()); + } + RecordDownloadMimeType(mime_type_); + DownloadContent file_type = DownloadContentFromMimeType(mime_type_, false); + auto in_progress_entry = delegate_->GetInProgressEntry(this); + if (in_progress_entry) { + DownloadUkmHelper::RecordDownloadStarted( + in_progress_entry->ukm_download_id, new_create_info.ukm_source_id, + file_type, download_source_); + } + + if (!delegate_->IsOffTheRecord()) { + RecordDownloadCountWithSource(NEW_DOWNLOAD_COUNT_NORMAL_PROFILE, + download_source_); + RecordDownloadMimeTypeForNormalProfile(mime_type_); + } + } + + // Successful download start. + DCHECK(download_file_); + DCHECK(job_); + + if (state_ == RESUMING_INTERNAL) + UpdateValidatorsOnResumption(new_create_info); + + // If the download is not parallel download during resumption, clear the + // |received_slices_|. + if (!job_->IsParallelizable() && !received_slices_.empty()) { + destination_info_.received_bytes = + GetMaxContiguousDataBlockSizeFromBeginning(received_slices_); + received_slices_.clear(); + } + + TransitionTo(TARGET_PENDING_INTERNAL); + + job_->Start(download_file_.get(), + base::Bind(&DownloadItemImpl::OnDownloadFileInitialized, + weak_ptr_factory_.GetWeakPtr()), + GetReceivedSlices()); +} + +void DownloadItemImpl::OnDownloadFileInitialized(DownloadInterruptReason result, + int64_t bytes_wasted) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(state_ == TARGET_PENDING_INTERNAL || + state_ == INTERRUPTED_TARGET_PENDING_INTERNAL) + << "Unexpected state: " << DebugDownloadStateString(state_); + + DVLOG(20) << __func__ + << "() result:" << DownloadInterruptReasonToString(result); + + if (bytes_wasted > 0) { + bytes_wasted_ = bytes_wasted; + auto in_progress_entry = delegate_->GetInProgressEntry(this); + if (in_progress_entry.has_value()) { + DownloadEntry entry = in_progress_entry.value(); + bytes_wasted_ = entry.bytes_wasted + bytes_wasted; + delegate_->ReportBytesWasted(this); + } + } + + // Handle download interrupt reason. + if (result != DOWNLOAD_INTERRUPT_REASON_NONE) { + ReleaseDownloadFile(true); + InterruptAndDiscardPartialState(result); + } + + DetermineDownloadTarget(); +} + +void DownloadItemImpl::DetermineDownloadTarget() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DVLOG(20) << __func__ << "() " << DebugString(true); + + RecordDownloadCountWithSource(DETERMINE_DOWNLOAD_TARGET_COUNT, + download_source_); + delegate_->DetermineDownloadTarget( + this, base::Bind(&DownloadItemImpl::OnDownloadTargetDetermined, + weak_ptr_factory_.GetWeakPtr())); +} + +// Called by delegate_ when the download target path has been determined. +void DownloadItemImpl::OnDownloadTargetDetermined( + const base::FilePath& target_path, + TargetDisposition disposition, + DownloadDangerType danger_type, + const base::FilePath& intermediate_path, + DownloadInterruptReason interrupt_reason) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + if (state_ == CANCELLED_INTERNAL) + return; + + DCHECK(state_ == TARGET_PENDING_INTERNAL || + state_ == INTERRUPTED_TARGET_PENDING_INTERNAL); + DVLOG(20) << __func__ << "() target_path:" << target_path.value() + << " intermediate_path:" << intermediate_path.value() + << " disposition:" << disposition << " danger_type:" << danger_type + << " interrupt_reason:" + << DownloadInterruptReasonToString(interrupt_reason) + << " this:" << DebugString(true); + + RecordDownloadCountWithSource(DOWNLOAD_TARGET_DETERMINED_COUNT, + download_source_); + + if (IsCancellation(interrupt_reason) || target_path.empty()) { + Cancel(true); + return; + } + + // There were no other pending errors, and we just failed to determined the + // download target. The target path, if it is non-empty, should be considered + // suspect. The safe option here is to interrupt the download without doing an + // intermediate rename. In the case of a new download, we'll lose the partial + // data that may have been downloaded, but that should be a small loss. + if (state_ == TARGET_PENDING_INTERNAL && + interrupt_reason != DOWNLOAD_INTERRUPT_REASON_NONE) { + deferred_interrupt_reason_ = interrupt_reason; + TransitionTo(INTERRUPTED_TARGET_PENDING_INTERNAL); + OnTargetResolved(); + return; + } + + destination_info_.target_path = target_path; + destination_info_.target_disposition = disposition; + SetDangerType(danger_type); + + // This was an interrupted download that was looking for a filename. Resolve + // early without performing the intermediate rename. If there is a + // DownloadFile, then that should be renamed to the intermediate name before + // we can interrupt the download. Otherwise we may lose intermediate state. + if (state_ == INTERRUPTED_TARGET_PENDING_INTERNAL && !download_file_) { + OnTargetResolved(); + return; + } + + // We want the intermediate and target paths to refer to the same directory so + // that they are both on the same device and subject to same + // space/permission/availability constraints. + DCHECK(intermediate_path.DirName() == target_path.DirName()); + + // During resumption, we may choose to proceed with the same intermediate + // file. No rename is necessary if our intermediate file already has the + // correct name. + // + // The intermediate name may change from its original value during filename + // determination on resumption, for example if the reason for the interruption + // was the download target running out space, resulting in a user prompt. + if (intermediate_path == GetFullPath()) { + OnDownloadRenamedToIntermediateName(DOWNLOAD_INTERRUPT_REASON_NONE, + intermediate_path); + return; + } + + // Rename to intermediate name. + // TODO(asanka): Skip this rename if AllDataSaved() is true. This avoids a + // spurious rename when we can just rename to the final + // filename. Unnecessary renames may cause bugs like + // http://crbug.com/74187. + DCHECK(!IsSavePackageDownload()); + DownloadFile::RenameCompletionCallback callback = + base::Bind(&DownloadItemImpl::OnDownloadRenamedToIntermediateName, + weak_ptr_factory_.GetWeakPtr()); + GetDownloadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce(&DownloadFile::RenameAndUniquify, + // Safe because we control download file lifetime. + base::Unretained(download_file_.get()), intermediate_path, + std::move(callback))); +} + +void DownloadItemImpl::OnDownloadRenamedToIntermediateName( + DownloadInterruptReason reason, + const base::FilePath& full_path) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(state_ == TARGET_PENDING_INTERNAL || + state_ == INTERRUPTED_TARGET_PENDING_INTERNAL); + DCHECK(download_file_); + DVLOG(20) << __func__ << "() download=" << DebugString(true); + + if (DOWNLOAD_INTERRUPT_REASON_NONE == reason) { + SetFullPath(full_path); + } else { + // TODO(asanka): Even though the rename failed, it may still be possible to + // recover the partial state from the 'before' name. + deferred_interrupt_reason_ = reason; + TransitionTo(INTERRUPTED_TARGET_PENDING_INTERNAL); + } + OnTargetResolved(); +} + +void DownloadItemImpl::OnTargetResolved() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DVLOG(20) << __func__ << "() download=" << DebugString(true); + DCHECK((state_ == TARGET_PENDING_INTERNAL && + deferred_interrupt_reason_ == DOWNLOAD_INTERRUPT_REASON_NONE) || + (state_ == INTERRUPTED_TARGET_PENDING_INTERNAL && + deferred_interrupt_reason_ != DOWNLOAD_INTERRUPT_REASON_NONE)) + << " deferred_interrupt_reason_:" + << DownloadInterruptReasonToString(deferred_interrupt_reason_) + << " this:" << DebugString(true); + + // This transition is here to ensure that the DownloadItemImpl state machine + // doesn't transition to INTERRUPTED or IN_PROGRESS from + // TARGET_PENDING_INTERNAL directly. Doing so without passing through + // OnTargetResolved() can result in an externally visible state where the + // download is interrupted but doesn't have a target path associated with it. + // + // While not terrible, this complicates the DownloadItem<->Observer + // relationship since an observer that needs a target path in order to respond + // properly to an interruption will need to wait for another OnDownloadUpdated + // notification. This requirement currently affects all of our UIs. + TransitionTo(TARGET_RESOLVED_INTERNAL); + + if (DOWNLOAD_INTERRUPT_REASON_NONE != deferred_interrupt_reason_) { + InterruptWithPartialState(GetReceivedBytes(), std::move(hash_state_), + deferred_interrupt_reason_); + deferred_interrupt_reason_ = DOWNLOAD_INTERRUPT_REASON_NONE; + UpdateObservers(); + return; + } + + TransitionTo(IN_PROGRESS_INTERNAL); + // TODO(asanka): Calling UpdateObservers() prior to MaybeCompleteDownload() is + // not safe. The download could be in an underminate state after invoking + // observers. http://crbug.com/586610 + UpdateObservers(); + MaybeCompleteDownload(); +} + +// When SavePackage downloads MHTML to GData (see +// SavePackageFilePickerChromeOS), GData calls MaybeCompleteDownload() like it +// does for non-SavePackage downloads, but SavePackage downloads never satisfy +// IsDownloadReadyForCompletion(). GDataDownloadObserver manually calls +// DownloadItem::UpdateObservers() when the upload completes so that +// SavePackage notices that the upload has completed and runs its normal +// Finish() pathway. MaybeCompleteDownload() is never the mechanism by which +// SavePackage completes downloads. SavePackage always uses its own Finish() to +// mark downloads complete. +void DownloadItemImpl::MaybeCompleteDownload() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(!IsSavePackageDownload()); + + if (!IsDownloadReadyForCompletion( + base::Bind(&DownloadItemImpl::MaybeCompleteDownload, + weak_ptr_factory_.GetWeakPtr()))) + return; + // Confirm we're in the proper set of states to be here; have all data, have a + // history handle, (validated or safe). + DCHECK_EQ(IN_PROGRESS_INTERNAL, state_); + DCHECK(!IsDangerous()); + DCHECK(AllDataSaved()); + + OnDownloadCompleting(); +} + +// Called by MaybeCompleteDownload() when it has determined that the download +// is ready for completion. +void DownloadItemImpl::OnDownloadCompleting() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + if (state_ != IN_PROGRESS_INTERNAL) + return; + + DVLOG(20) << __func__ << "() " << DebugString(true); + DCHECK(!GetTargetFilePath().empty()); + DCHECK(!IsDangerous()); + + DCHECK(download_file_.get()); + // Unilaterally rename; even if it already has the right name, + // we need theannotation. + DownloadFile::RenameCompletionCallback callback = + base::Bind(&DownloadItemImpl::OnDownloadRenamedToFinalName, + weak_ptr_factory_.GetWeakPtr()); + GetDownloadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce(&DownloadFile::RenameAndAnnotate, + base::Unretained(download_file_.get()), + GetTargetFilePath(), + delegate_->GetApplicationClientIdForFileScanning(), + GetURL(), GetReferrerUrl(), std::move(callback))); +} + +void DownloadItemImpl::OnDownloadRenamedToFinalName( + DownloadInterruptReason reason, + const base::FilePath& full_path) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(!IsSavePackageDownload()); + + // If a cancel or interrupt hit, we'll cancel the DownloadFile, which + // will result in deleting the file on the file thread. So we don't + // care about the name having been changed. + if (state_ != IN_PROGRESS_INTERNAL) + return; + + DVLOG(20) << __func__ << "() full_path = \"" << full_path.value() << "\" " + << DebugString(false); + + if (DOWNLOAD_INTERRUPT_REASON_NONE != reason) { + // Failure to perform the final rename is considered fatal. TODO(asanka): It + // may not be, in which case we should figure out whether we can recover the + // state. + InterruptAndDiscardPartialState(reason); + UpdateObservers(); + return; + } + + DCHECK(GetTargetFilePath() == full_path); + + if (full_path != GetFullPath()) { + // full_path is now the current and target file path. + DCHECK(!full_path.empty()); + SetFullPath(full_path); + } + + // Complete the download and release the DownloadFile. + DCHECK(download_file_); + ReleaseDownloadFile(false); + + // We're not completely done with the download item yet, but at this + // point we're committed to complete the download. Cancels (or Interrupts, + // though it's not clear how they could happen) after this point will be + // ignored. + TransitionTo(COMPLETING_INTERNAL); + + if (delegate_->ShouldOpenDownload( + this, base::Bind(&DownloadItemImpl::DelayedDownloadOpened, + weak_ptr_factory_.GetWeakPtr()))) { + Completed(); + } else { + delegate_delayed_complete_ = true; + UpdateObservers(); + } +} + +void DownloadItemImpl::DelayedDownloadOpened(bool auto_opened) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + auto_opened_ = auto_opened; + Completed(); +} + +void DownloadItemImpl::Completed() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + DVLOG(20) << __func__ << "() " << DebugString(false); + + DCHECK(AllDataSaved()); + destination_info_.end_time = base::Time::Now(); + TransitionTo(COMPLETE_INTERNAL); + + bool is_parallelizable = job_ && job_->IsParallelizable(); + RecordDownloadCompleted(start_tick_, GetReceivedBytes(), is_parallelizable, + download_source_); + if (!delegate_->IsOffTheRecord()) { + RecordDownloadCountWithSource(COMPLETED_COUNT_NORMAL_PROFILE, + download_source_); + } + if (is_parallelizable) { + RecordParallelizableDownloadCount(COMPLETED_COUNT, + IsParallelDownloadEnabled()); + int64_t content_length = -1; + if (response_headers_->response_code() != net::HTTP_PARTIAL_CONTENT) { + content_length = response_headers_->GetContentLength(); + } else { + int64_t first_byte = -1; + int64_t last_byte = -1; + response_headers_->GetContentRangeFor206(&first_byte, &last_byte, + &content_length); + } + if (content_length > 0) + RecordParallelizableContentLength(content_length); + } + + if (auto_opened_) { + // If it was already handled by the delegate, do nothing. + } else if (GetOpenWhenComplete() || ShouldOpenFileBasedOnExtension() || + IsTemporary()) { + // If the download is temporary, like in drag-and-drop, do not open it but + // we still need to set it auto-opened so that it can be removed from the + // download shelf. + if (!IsTemporary()) + OpenDownload(); + + auto_opened_ = true; + } + + base::TimeDelta time_since_start = GetEndTime() - GetStartTime(); + + // If all data is saved, the number of received bytes is resulting file size. + int resulting_file_size = GetReceivedBytes(); + + auto in_progress_entry = delegate_->GetInProgressEntry(this); + if (in_progress_entry) { + DownloadUkmHelper::RecordDownloadCompleted( + in_progress_entry->ukm_download_id, resulting_file_size, + time_since_start, in_progress_entry->bytes_wasted); + } + + // After all of the records are done, then update the observers. + UpdateObservers(); +} + +// **** End of Download progression cascade + +void DownloadItemImpl::InterruptAndDiscardPartialState( + DownloadInterruptReason reason) { + InterruptWithPartialState(0, nullptr, reason); +} + +void DownloadItemImpl::InterruptWithPartialState( + int64_t bytes_so_far, + std::unique_ptr<crypto::SecureHash> hash_state, + DownloadInterruptReason reason) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK_NE(DOWNLOAD_INTERRUPT_REASON_NONE, reason); + DVLOG(20) << __func__ + << "() reason:" << DownloadInterruptReasonToString(reason) + << " bytes_so_far:" << bytes_so_far + << " hash_state:" << (hash_state ? "Valid" : "Invalid") + << " this=" << DebugString(true); + + // Somewhat counter-intuitively, it is possible for us to receive an + // interrupt after we've already been interrupted. The generation of + // interrupts from the file thread Renames and the generation of + // interrupts from disk writes go through two different mechanisms (driven + // by rename requests from UI thread and by write requests from IO thread, + // respectively), and since we choose not to keep state on the File thread, + // this is the place where the races collide. It's also possible for + // interrupts to race with cancels. + switch (state_) { + case CANCELLED_INTERNAL: + // If the download is already cancelled, then there's no point in + // transitioning out to interrupted. + case COMPLETING_INTERNAL: + case COMPLETE_INTERNAL: + // Already complete. + return; + + case INITIAL_INTERNAL: + case MAX_DOWNLOAD_INTERNAL_STATE: + NOTREACHED(); + return; + + case TARGET_PENDING_INTERNAL: + case INTERRUPTED_TARGET_PENDING_INTERNAL: + // Postpone recognition of this error until after file name determination + // has completed and the intermediate file has been renamed to simplify + // resumption conditions. The target determination logic is much simpler + // if the state of the download remains constant until that stage + // completes. + // + // current_path_ may be empty because it is possible for + // DownloadItem to receive a DestinationError prior to the + // download file initialization complete callback. + if (!IsCancellation(reason)) { + UpdateProgress(bytes_so_far, 0); + SetHashState(std::move(hash_state)); + deferred_interrupt_reason_ = reason; + TransitionTo(INTERRUPTED_TARGET_PENDING_INTERNAL); + return; + } + // else - Fallthrough for cancellation handling which is equivalent to the + // IN_PROGRESS state. + FALLTHROUGH; + + case IN_PROGRESS_INTERNAL: + case TARGET_RESOLVED_INTERNAL: + // last_reason_ needs to be set for GetResumeMode() to work. + last_reason_ = reason; + + if (download_file_) { + ResumeMode resume_mode = GetResumeMode(); + ReleaseDownloadFile(resume_mode != ResumeMode::IMMEDIATE_CONTINUE && + resume_mode != ResumeMode::USER_CONTINUE); + } + break; + + case RESUMING_INTERNAL: + case INTERRUPTED_INTERNAL: + DCHECK(!download_file_); + // The first non-cancel interrupt reason wins in cases where multiple + // things go wrong. + if (!IsCancellation(reason)) + return; + + last_reason_ = reason; + if (!GetFullPath().empty()) { + // There is no download file and this is transitioning from INTERRUPTED + // to CANCELLED. The intermediate file is no longer usable, and should + // be deleted. + GetDownloadTaskRunner()->PostTask( + FROM_HERE, base::BindOnce(base::IgnoreResult(&DeleteDownloadedFile), + GetFullPath())); + destination_info_.current_path.clear(); + } + break; + } + + // Reset all data saved, as even if we did save all the data we're going to go + // through another round of downloading when we resume. There's a potential + // problem here in the abstract, as if we did download all the data and then + // run into a continuable error, on resumption we won't download any more + // data. However, a) there are currently no continuable errors that can occur + // after we download all the data, and b) if there were, that would probably + // simply result in a null range request, which would generate a + // DestinationCompleted() notification from the DownloadFile, which would + // behave properly with setting all_data_saved_ to false here. + destination_info_.all_data_saved = false; + + if (GetFullPath().empty()) { + hash_state_.reset(); + destination_info_.hash.clear(); + destination_info_.received_bytes = 0; + received_slices_.clear(); + } else { + UpdateProgress(bytes_so_far, 0); + SetHashState(std::move(hash_state)); + } + + if (job_) + job_->Cancel(false); + + if (IsCancellation(reason)) { + if (IsDangerous()) { + RecordDangerousDownloadDiscard( + reason == DOWNLOAD_INTERRUPT_REASON_USER_CANCELED + ? DOWNLOAD_DISCARD_DUE_TO_USER_ACTION + : DOWNLOAD_DISCARD_DUE_TO_SHUTDOWN, + GetDangerType(), GetTargetFilePath()); + } + + RecordDownloadCountWithSource(CANCELLED_COUNT, download_source_); + if (job_ && job_->IsParallelizable()) { + RecordParallelizableDownloadCount(CANCELLED_COUNT, + IsParallelDownloadEnabled()); + } + DCHECK_EQ(last_reason_, reason); + TransitionTo(CANCELLED_INTERNAL); + return; + } + + RecordDownloadInterrupted(reason, GetReceivedBytes(), total_bytes_, + job_ && job_->IsParallelizable(), + IsParallelDownloadEnabled(), download_source_); + + base::TimeDelta time_since_start = base::Time::Now() - GetStartTime(); + int resulting_file_size = GetReceivedBytes(); + auto in_progress_entry = delegate_->GetInProgressEntry(this); + base::Optional<int> change_in_file_size; + if (in_progress_entry) { + if (total_bytes_ >= 0) { + change_in_file_size = total_bytes_ - resulting_file_size; + } + + DownloadUkmHelper::RecordDownloadInterrupted( + in_progress_entry->ukm_download_id, change_in_file_size, reason, + resulting_file_size, time_since_start, in_progress_entry->bytes_wasted); + } + if (reason == DOWNLOAD_INTERRUPT_REASON_SERVER_CONTENT_LENGTH_MISMATCH) { + received_bytes_at_length_mismatch_ = GetReceivedBytes(); + } + + // TODO(asanka): This is not good. We can transition to interrupted from + // target-pending, which is something we don't want to do. Perhaps we should + // explicitly transition to target-resolved prior to switching to interrupted. + DCHECK_EQ(last_reason_, reason); + TransitionTo(INTERRUPTED_INTERNAL); + delegate_->DownloadInterrupted(this); + AutoResumeIfValid(); +} + +void DownloadItemImpl::UpdateProgress(int64_t bytes_so_far, + int64_t bytes_per_sec) { + destination_info_.received_bytes = bytes_so_far; + bytes_per_sec_ = bytes_per_sec; + + // If we've received more data than we were expecting (bad server info?), + // revert to 'unknown size mode'. + if (bytes_so_far > total_bytes_) + total_bytes_ = 0; +} + +void DownloadItemImpl::SetHashState( + std::unique_ptr<crypto::SecureHash> hash_state) { + hash_state_ = std::move(hash_state); + if (!hash_state_) { + destination_info_.hash.clear(); + return; + } + + std::unique_ptr<crypto::SecureHash> clone_of_hash_state(hash_state_->Clone()); + std::vector<char> hash_value(clone_of_hash_state->GetHashLength()); + clone_of_hash_state->Finish(&hash_value.front(), hash_value.size()); + destination_info_.hash.assign(hash_value.begin(), hash_value.end()); +} + +void DownloadItemImpl::ReleaseDownloadFile(bool destroy_file) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DVLOG(20) << __func__ << "() destroy_file:" << destroy_file; + + if (destroy_file) { + GetDownloadTaskRunner()->PostTask( + FROM_HERE, + // Will be deleted at end of task execution. + base::BindOnce(&DownloadFileCancel, std::move(download_file_))); + // Avoid attempting to reuse the intermediate file by clearing out + // current_path_ and received slices. + destination_info_.current_path.clear(); + received_slices_.clear(); + } else { + GetDownloadTaskRunner()->PostTask( + FROM_HERE, base::BindOnce(base::IgnoreResult(&DownloadFileDetach), + // Will be deleted at end of task execution. + std::move(download_file_))); + } + // Don't accept any more messages from the DownloadFile, and null + // out any previous "all data received". This also breaks links to + // other entities we've given out weak pointers to. + weak_ptr_factory_.InvalidateWeakPtrs(); +} + +bool DownloadItemImpl::IsDownloadReadyForCompletion( + const base::Closure& state_change_notification) { + // If the download hasn't progressed to the IN_PROGRESS state, then it's not + // ready for completion. + if (state_ != IN_PROGRESS_INTERNAL) + return false; + + // If we don't have all the data, the download is not ready for + // completion. + if (!AllDataSaved()) + return false; + + // If the download is dangerous, but not yet validated, it's not ready for + // completion. + if (IsDangerous()) + return false; + + // Check for consistency before invoking delegate. Since there are no pending + // target determination calls and the download is in progress, both the target + // and current paths should be non-empty and they should point to the same + // directory. + DCHECK(!GetTargetFilePath().empty()); + DCHECK(!GetFullPath().empty()); + DCHECK(GetTargetFilePath().DirName() == GetFullPath().DirName()); + + // Give the delegate a chance to hold up a stop sign. It'll call + // use back through the passed callback if it does and that state changes. + if (!delegate_->ShouldCompleteDownload(this, state_change_notification)) + return false; + + return true; +} + +void DownloadItemImpl::TransitionTo(DownloadInternalState new_state) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + if (state_ == new_state) + return; + + DownloadInternalState old_state = state_; + state_ = new_state; + + DCHECK(IsSavePackageDownload() + ? IsValidSavePackageStateTransition(old_state, new_state) + : IsValidStateTransition(old_state, new_state)) + << "Invalid state transition from:" << DebugDownloadStateString(old_state) + << " to:" << DebugDownloadStateString(new_state); + + switch (state_) { + case INITIAL_INTERNAL: + NOTREACHED(); + break; + + case TARGET_PENDING_INTERNAL: + case TARGET_RESOLVED_INTERNAL: + break; + + case INTERRUPTED_TARGET_PENDING_INTERNAL: + DCHECK_NE(DOWNLOAD_INTERRUPT_REASON_NONE, deferred_interrupt_reason_) + << "Interrupt reason must be set prior to transitioning into " + "TARGET_PENDING"; + break; + + case IN_PROGRESS_INTERNAL: + DCHECK(!GetFullPath().empty()) << "Current output path must be known."; + DCHECK(!GetTargetFilePath().empty()) << "Target path must be known."; + DCHECK(GetFullPath().DirName() == GetTargetFilePath().DirName()) + << "Current output directory must match target directory."; + DCHECK(download_file_) << "Output file must be owned by download item."; + DCHECK(job_) << "Must have active download job."; + DCHECK(!job_->is_paused()) + << "At the time a download enters IN_PROGRESS state, " + "it must not be paused."; + break; + + case COMPLETING_INTERNAL: + DCHECK(AllDataSaved()) << "All data must be saved prior to completion."; + DCHECK(!download_file_) + << "Download file must be released prior to completion."; + DCHECK(!GetTargetFilePath().empty()) << "Target path must be known."; + DCHECK(GetFullPath() == GetTargetFilePath()) + << "Current output path must match target path."; + + TRACE_EVENT_INSTANT2("download", "DownloadItemCompleting", + TRACE_EVENT_SCOPE_THREAD, "bytes_so_far", + GetReceivedBytes(), "final_hash", + destination_info_.hash); + break; + + case COMPLETE_INTERNAL: + TRACE_EVENT_INSTANT1("download", "DownloadItemFinished", + TRACE_EVENT_SCOPE_THREAD, "auto_opened", + auto_opened_ ? "yes" : "no"); + break; + + case INTERRUPTED_INTERNAL: + DCHECK(!download_file_) + << "Download file must be released prior to interruption."; + DCHECK_NE(last_reason_, DOWNLOAD_INTERRUPT_REASON_NONE); + TRACE_EVENT_INSTANT2("download", "DownloadItemInterrupted", + TRACE_EVENT_SCOPE_THREAD, "interrupt_reason", + DownloadInterruptReasonToString(last_reason_), + "bytes_so_far", GetReceivedBytes()); + break; + + case RESUMING_INTERNAL: + TRACE_EVENT_INSTANT2("download", "DownloadItemResumed", + TRACE_EVENT_SCOPE_THREAD, "interrupt_reason", + DownloadInterruptReasonToString(last_reason_), + "bytes_so_far", GetReceivedBytes()); + break; + + case CANCELLED_INTERNAL: + TRACE_EVENT_INSTANT1("download", "DownloadItemCancelled", + TRACE_EVENT_SCOPE_THREAD, "bytes_so_far", + GetReceivedBytes()); + break; + + case MAX_DOWNLOAD_INTERNAL_STATE: + NOTREACHED(); + break; + } + + DVLOG(20) << __func__ << "() from:" << DebugDownloadStateString(old_state) + << " to:" << DebugDownloadStateString(state_) + << " this = " << DebugString(true); + bool is_done = + (state_ == COMPLETE_INTERNAL || state_ == INTERRUPTED_INTERNAL || + state_ == RESUMING_INTERNAL || state_ == CANCELLED_INTERNAL); + bool was_done = + (old_state == COMPLETE_INTERNAL || old_state == INTERRUPTED_INTERNAL || + old_state == RESUMING_INTERNAL || old_state == CANCELLED_INTERNAL); + + // Termination + if (is_done && !was_done) + TRACE_EVENT_ASYNC_END0("download", "DownloadItemActive", download_id_); + + // Resumption + if (was_done && !is_done) { + std::string file_name(GetTargetFilePath().BaseName().AsUTF8Unsafe()); + TRACE_EVENT_NESTABLE_ASYNC_BEGIN1( + "download", "DownloadItemActive", download_id_, "download_item", + std::make_unique<DownloadItemActivatedData>( + TYPE_ACTIVE_DOWNLOAD, GetId(), GetOriginalUrl().spec(), + GetURL().spec(), file_name, GetDangerType(), GetReceivedBytes(), + HasUserGesture())); + } +} + +void DownloadItemImpl::SetDangerType(DownloadDangerType danger_type) { + if (danger_type != danger_type_) { + TRACE_EVENT_INSTANT1("download", "DownloadItemSaftyStateUpdated", + TRACE_EVENT_SCOPE_THREAD, "danger_type", + GetDownloadDangerNames(danger_type).c_str()); + } + // Only record the Malicious UMA stat if it's going from {not malicious} -> + // {malicious}. + if ((danger_type_ == DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS || + danger_type_ == DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE || + danger_type_ == DOWNLOAD_DANGER_TYPE_UNCOMMON_CONTENT || + danger_type_ == DOWNLOAD_DANGER_TYPE_MAYBE_DANGEROUS_CONTENT) && + (danger_type == DOWNLOAD_DANGER_TYPE_DANGEROUS_HOST || + danger_type == DOWNLOAD_DANGER_TYPE_DANGEROUS_URL || + danger_type == DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT || + danger_type == DOWNLOAD_DANGER_TYPE_POTENTIALLY_UNWANTED)) { + RecordMaliciousDownloadClassified(danger_type); + } + danger_type_ = danger_type; +} + +void DownloadItemImpl::SetFullPath(const base::FilePath& new_path) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DVLOG(20) << __func__ << "() new_path = \"" << new_path.value() << "\" " + << DebugString(true); + DCHECK(!new_path.empty()); + + TRACE_EVENT_INSTANT2("download", "DownloadItemRenamed", + TRACE_EVENT_SCOPE_THREAD, "old_filename", + destination_info_.current_path.AsUTF8Unsafe(), + "new_filename", new_path.AsUTF8Unsafe()); + + destination_info_.current_path = new_path; +} + +void DownloadItemImpl::AutoResumeIfValid() { + DVLOG(20) << __func__ << "() " << DebugString(true); + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + ResumeMode mode = GetResumeMode(); + + if (mode != ResumeMode::IMMEDIATE_RESTART && + mode != ResumeMode::IMMEDIATE_CONTINUE) { + return; + } + + auto_resume_count_++; + + ResumeInterruptedDownload(ResumptionRequestSource::AUTOMATIC); +} + +void DownloadItemImpl::ResumeInterruptedDownload( + ResumptionRequestSource source) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + // If we're not interrupted, ignore the request; our caller is drunk. + if (state_ != INTERRUPTED_INTERNAL) + return; + + // We are starting a new request. Shake off all pending operations. + DCHECK(!download_file_); + weak_ptr_factory_.InvalidateWeakPtrs(); + + // Reset the appropriate state if restarting. + ResumeMode mode = GetResumeMode(); + if (mode == ResumeMode::IMMEDIATE_RESTART || + mode == ResumeMode::USER_RESTART) { + DCHECK(GetFullPath().empty()); + destination_info_.received_bytes = 0; + last_modified_time_.clear(); + etag_.clear(); + destination_info_.hash.clear(); + hash_state_.reset(); + received_slices_.clear(); + } + + net::NetworkTrafficAnnotationTag traffic_annotation = + net::DefineNetworkTrafficAnnotation("download_manager_resume", R"( + semantics { + sender: "Download Manager" + description: + "When user resumes downloading a file, a network request is made " + "to fetch it." + trigger: + "User resumes a download." + data: "None." + destination: WEBSITE + } + policy { + cookies_allowed: YES + cookies_store: "user" + setting: + "This feature cannot be disabled in settings, but it is activated " + "by direct user action." + chrome_policy { + DownloadRestrictions { + DownloadRestrictions: 3 + } + } + })"); + // Avoid using the WebContents even if it's still around. Resumption requests + // are consistently routed through the no-renderer code paths so that the + // request will not be dropped if the WebContents (and by extension, the + // associated renderer) goes away before a response is received. + std::unique_ptr<DownloadUrlParameters> download_params( + new DownloadUrlParameters(GetURL(), nullptr, traffic_annotation)); + download_params->set_file_path(GetFullPath()); + if (received_slices_.size() > 0) { + std::vector<DownloadItem::ReceivedSlice> slices_to_download = + FindSlicesToDownload(received_slices_); + download_params->set_offset(slices_to_download[0].offset); + } else { + download_params->set_offset(GetReceivedBytes()); + } + download_params->set_last_modified(GetLastModifiedTime()); + download_params->set_etag(GetETag()); + download_params->set_hash_of_partial_file(GetHash()); + download_params->set_hash_state(std::move(hash_state_)); + + // TODO(xingliu): Read |fetch_error_body| and |request_headers_| from the + // cache, and don't copy them into DownloadItemImpl. + download_params->set_fetch_error_body(fetch_error_body_); + for (const auto& header : request_headers_) { + download_params->add_request_header(header.first, header.second); + } + + auto entry = delegate_->GetInProgressEntry(this); + if (entry) + download_params->set_request_origin(entry.value().request_origin); + + // Note that resumed downloads disallow redirects. Hence the referrer URL + // (which is the contents of the Referer header for the last download request) + // will only be sent to the URL returned by GetURL(). + download_params->set_referrer(GetReferrerUrl()); + download_params->set_referrer_policy(net::URLRequest::NEVER_CLEAR_REFERRER); + + TransitionTo(RESUMING_INTERNAL); + RecordDownloadCountWithSource(source == ResumptionRequestSource::USER + ? MANUAL_RESUMPTION_COUNT + : AUTO_RESUMPTION_COUNT, + download_source_); + + base::TimeDelta time_since_start = base::Time::Now() - GetStartTime(); + auto in_progress_entry = delegate_->GetInProgressEntry(this); + if (in_progress_entry) { + DownloadUkmHelper::RecordDownloadResumed(in_progress_entry->ukm_download_id, + GetResumeMode(), time_since_start); + } + + delegate_->ResumeInterruptedDownload(std::move(download_params), GetId(), + request_info_.site_url); + + if (job_) + job_->Resume(false); +} + +// static +DownloadItem::DownloadState DownloadItemImpl::InternalToExternalState( + DownloadInternalState internal_state) { + switch (internal_state) { + case INITIAL_INTERNAL: + case TARGET_PENDING_INTERNAL: + case TARGET_RESOLVED_INTERNAL: + case INTERRUPTED_TARGET_PENDING_INTERNAL: + // TODO(asanka): Introduce an externally visible state to distinguish + // between the above states and IN_PROGRESS_INTERNAL. The latter (the + // state where the download is active and has a known target) is the state + // that most external users are interested in. + case IN_PROGRESS_INTERNAL: + return IN_PROGRESS; + case COMPLETING_INTERNAL: + return IN_PROGRESS; + case COMPLETE_INTERNAL: + return COMPLETE; + case CANCELLED_INTERNAL: + return CANCELLED; + case INTERRUPTED_INTERNAL: + return INTERRUPTED; + case RESUMING_INTERNAL: + return IN_PROGRESS; + case MAX_DOWNLOAD_INTERNAL_STATE: + break; + } + NOTREACHED(); + return MAX_DOWNLOAD_STATE; +} + +// static +DownloadItemImpl::DownloadInternalState +DownloadItemImpl::ExternalToInternalState(DownloadState external_state) { + switch (external_state) { + case IN_PROGRESS: + return IN_PROGRESS_INTERNAL; + case COMPLETE: + return COMPLETE_INTERNAL; + case CANCELLED: + return CANCELLED_INTERNAL; + case INTERRUPTED: + return INTERRUPTED_INTERNAL; + default: + NOTREACHED(); + } + return MAX_DOWNLOAD_INTERNAL_STATE; +} + +// static +bool DownloadItemImpl::IsValidSavePackageStateTransition( + DownloadInternalState from, + DownloadInternalState to) { +#if DCHECK_IS_ON() + switch (from) { + case INITIAL_INTERNAL: + case TARGET_PENDING_INTERNAL: + case INTERRUPTED_TARGET_PENDING_INTERNAL: + case TARGET_RESOLVED_INTERNAL: + case COMPLETING_INTERNAL: + case COMPLETE_INTERNAL: + case INTERRUPTED_INTERNAL: + case RESUMING_INTERNAL: + case CANCELLED_INTERNAL: + return false; + + case IN_PROGRESS_INTERNAL: + return to == CANCELLED_INTERNAL || to == COMPLETE_INTERNAL; + + case MAX_DOWNLOAD_INTERNAL_STATE: + NOTREACHED(); + } + return false; +#else + return true; +#endif +} + +// static +bool DownloadItemImpl::IsValidStateTransition(DownloadInternalState from, + DownloadInternalState to) { +#if DCHECK_IS_ON() + switch (from) { + case INITIAL_INTERNAL: + return to == TARGET_PENDING_INTERNAL || + to == INTERRUPTED_TARGET_PENDING_INTERNAL; + + case TARGET_PENDING_INTERNAL: + return to == INTERRUPTED_TARGET_PENDING_INTERNAL || + to == TARGET_RESOLVED_INTERNAL || to == CANCELLED_INTERNAL; + + case INTERRUPTED_TARGET_PENDING_INTERNAL: + return to == TARGET_RESOLVED_INTERNAL || to == CANCELLED_INTERNAL; + + case TARGET_RESOLVED_INTERNAL: + return to == IN_PROGRESS_INTERNAL || to == INTERRUPTED_INTERNAL || + to == CANCELLED_INTERNAL; + + case IN_PROGRESS_INTERNAL: + return to == COMPLETING_INTERNAL || to == CANCELLED_INTERNAL || + to == INTERRUPTED_INTERNAL; + + case COMPLETING_INTERNAL: + return to == COMPLETE_INTERNAL; + + case COMPLETE_INTERNAL: + return false; + + case INTERRUPTED_INTERNAL: + return to == RESUMING_INTERNAL || to == CANCELLED_INTERNAL; + + case RESUMING_INTERNAL: + return to == TARGET_PENDING_INTERNAL || + to == INTERRUPTED_TARGET_PENDING_INTERNAL || + to == TARGET_RESOLVED_INTERNAL || to == CANCELLED_INTERNAL; + + case CANCELLED_INTERNAL: + return false; + + case MAX_DOWNLOAD_INTERNAL_STATE: + NOTREACHED(); + } + return false; +#else + return true; +#endif // DCHECK_IS_ON() +} + +const char* DownloadItemImpl::DebugDownloadStateString( + DownloadInternalState state) { + switch (state) { + case INITIAL_INTERNAL: + return "INITIAL"; + case TARGET_PENDING_INTERNAL: + return "TARGET_PENDING"; + case INTERRUPTED_TARGET_PENDING_INTERNAL: + return "INTERRUPTED_TARGET_PENDING"; + case TARGET_RESOLVED_INTERNAL: + return "TARGET_RESOLVED"; + case IN_PROGRESS_INTERNAL: + return "IN_PROGRESS"; + case COMPLETING_INTERNAL: + return "COMPLETING"; + case COMPLETE_INTERNAL: + return "COMPLETE"; + case CANCELLED_INTERNAL: + return "CANCELLED"; + case INTERRUPTED_INTERNAL: + return "INTERRUPTED"; + case RESUMING_INTERNAL: + return "RESUMING"; + case MAX_DOWNLOAD_INTERNAL_STATE: + break; + } + NOTREACHED() << "Unknown download state " << state; + return "unknown"; +} + +const char* DownloadItemImpl::DebugResumeModeString(ResumeMode mode) { + switch (mode) { + case ResumeMode::INVALID: + return "INVALID"; + case ResumeMode::IMMEDIATE_CONTINUE: + return "IMMEDIATE_CONTINUE"; + case ResumeMode::IMMEDIATE_RESTART: + return "IMMEDIATE_RESTART"; + case ResumeMode::USER_CONTINUE: + return "USER_CONTINUE"; + case ResumeMode::USER_RESTART: + return "USER_RESTART"; + } + NOTREACHED() << "Unknown resume mode " << static_cast<int>(mode); + return "unknown"; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_item_impl_delegate.cc b/chromium/components/download/internal/common/download_item_impl_delegate.cc new file mode 100644 index 00000000000..363d564d996 --- /dev/null +++ b/chromium/components/download/internal/common/download_item_impl_delegate.cc @@ -0,0 +1,102 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_item_impl_delegate.h" + +#include "base/logging.h" +#include "components/download/downloader/in_progress/download_entry.h" +#include "components/download/public/common/download_danger_type.h" +#include "components/download/public/common/download_item_impl.h" + +namespace download { + +// Infrastructure in DownloadItemImplDelegate to assert invariant that +// delegate always outlives all attached DownloadItemImpls. +DownloadItemImplDelegate::DownloadItemImplDelegate() : count_(0) {} + +DownloadItemImplDelegate::~DownloadItemImplDelegate() { + DCHECK_EQ(0, count_); +} + +void DownloadItemImplDelegate::Attach() { + ++count_; +} + +void DownloadItemImplDelegate::Detach() { + DCHECK_LT(0, count_); + --count_; +} + +void DownloadItemImplDelegate::DetermineDownloadTarget( + DownloadItemImpl* download, + const DownloadTargetCallback& callback) { + // TODO(rdsmith/asanka): Do something useful if forced file path is null. + base::FilePath target_path(download->GetForcedFilePath()); + callback.Run(target_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, target_path, + DOWNLOAD_INTERRUPT_REASON_NONE); +} + +bool DownloadItemImplDelegate::ShouldCompleteDownload( + DownloadItemImpl* download, + const base::Closure& complete_callback) { + return true; +} + +bool DownloadItemImplDelegate::ShouldOpenDownload( + DownloadItemImpl* download, + const ShouldOpenDownloadCallback& callback) { + return false; +} + +bool DownloadItemImplDelegate::ShouldOpenFileBasedOnExtension( + const base::FilePath& path) { + return false; +} + +void DownloadItemImplDelegate::CheckForFileRemoval( + DownloadItemImpl* download_item) {} + +std::string DownloadItemImplDelegate::GetApplicationClientIdForFileScanning() + const { + return std::string(); +} + +void DownloadItemImplDelegate::ResumeInterruptedDownload( + std::unique_ptr<DownloadUrlParameters> params, + uint32_t id, + const GURL& site_url) {} + +void DownloadItemImplDelegate::UpdatePersistence(DownloadItemImpl* download) {} + +void DownloadItemImplDelegate::OpenDownload(DownloadItemImpl* download) {} + +bool DownloadItemImplDelegate::IsMostRecentDownloadItemAtFilePath( + DownloadItemImpl* download) { + return true; +} + +void DownloadItemImplDelegate::ShowDownloadInShell(DownloadItemImpl* download) { +} + +void DownloadItemImplDelegate::DownloadRemoved(DownloadItemImpl* download) {} + +void DownloadItemImplDelegate::AssertStateConsistent( + DownloadItemImpl* download) const {} + +void DownloadItemImplDelegate::DownloadInterrupted(DownloadItemImpl* download) { +} + +base::Optional<DownloadEntry> DownloadItemImplDelegate::GetInProgressEntry( + DownloadItemImpl* download) { + return base::Optional<DownloadEntry>(); +} + +bool DownloadItemImplDelegate::IsOffTheRecord() const { + return false; +} + +void DownloadItemImplDelegate::ReportBytesWasted(DownloadItemImpl* download) {} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_item_impl_unittest.cc b/chromium/components/download/internal/common/download_item_impl_unittest.cc new file mode 100644 index 00000000000..8515eb822f6 --- /dev/null +++ b/chromium/components/download/internal/common/download_item_impl_unittest.cc @@ -0,0 +1,2425 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_item_impl.h" + +#include <stdint.h> + +#include <iterator> +#include <map> +#include <memory> +#include <utility> +#include <vector> + +#include "base/bind.h" +#include "base/callback.h" +#include "base/callback_helpers.h" +#include "base/containers/circular_deque.h" +#include "base/containers/queue.h" +#include "base/files/file_util.h" +#include "base/memory/ptr_util.h" +#include "base/test/histogram_tester.h" +#include "base/test/scoped_task_environment.h" +#include "base/threading/thread.h" +#include "components/download/public/common/download_create_info.h" +#include "components/download/public/common/download_destination_observer.h" +#include "components/download/public/common/download_file_factory.h" +#include "components/download/public/common/download_interrupt_reasons.h" +#include "components/download/public/common/download_item_impl_delegate.h" +#include "components/download/public/common/download_request_handle_interface.h" +#include "components/download/public/common/download_url_parameters.h" +#include "components/download/public/common/mock_download_file.h" +#include "components/download/public/common/mock_download_item.h" +#include "crypto/secure_hash.h" +#include "net/http/http_response_headers.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; +using ::testing::NiceMock; +using ::testing::Property; +using ::testing::Return; +using ::testing::ReturnRefOfCopy; +using ::testing::SaveArg; +using ::testing::StrictMock; +using ::testing::WithArg; +using ::testing::_; + +const int kDownloadChunkSize = 1000; +const int kDownloadSpeed = 1000; +const base::FilePath::CharType kDummyTargetPath[] = + FILE_PATH_LITERAL("/testpath"); +const base::FilePath::CharType kDummyIntermediatePath[] = + FILE_PATH_LITERAL("/testpathx"); + +namespace download { + +namespace { + +template <typename T> +base::HistogramBase::Sample ToHistogramSample(T t) { + return static_cast<base::HistogramBase::Sample>(t); +} + +class MockDelegate : public DownloadItemImplDelegate { + public: + MockDelegate() : DownloadItemImplDelegate() { SetDefaultExpectations(); } + + MOCK_METHOD2(DetermineDownloadTarget, + void(DownloadItemImpl*, + const DownloadItemImplDelegate::DownloadTargetCallback&)); + MOCK_METHOD2(ShouldCompleteDownload, + bool(DownloadItemImpl*, const base::Closure&)); + MOCK_METHOD2(ShouldOpenDownload, + bool(DownloadItemImpl*, const ShouldOpenDownloadCallback&)); + MOCK_METHOD1(ShouldOpenFileBasedOnExtension, bool(const base::FilePath&)); + MOCK_METHOD1(CheckForFileRemoval, void(DownloadItemImpl*)); + + void ResumeInterruptedDownload(std::unique_ptr<DownloadUrlParameters> params, + uint32_t id, + const GURL& site_url) override { + MockResumeInterruptedDownload(params.get(), id); + } + MOCK_METHOD2(MockResumeInterruptedDownload, + void(DownloadUrlParameters* params, uint32_t id)); + + MOCK_METHOD1(DownloadOpened, void(DownloadItemImpl*)); + MOCK_METHOD1(DownloadRemoved, void(DownloadItemImpl*)); + MOCK_CONST_METHOD1(AssertStateConsistent, void(DownloadItemImpl*)); + + void VerifyAndClearExpectations() { + ::testing::Mock::VerifyAndClearExpectations(this); + SetDefaultExpectations(); + } + + private: + void SetDefaultExpectations() { + EXPECT_CALL(*this, AssertStateConsistent(_)).WillRepeatedly(Return()); + EXPECT_CALL(*this, ShouldOpenFileBasedOnExtension(_)) + .WillRepeatedly(Return(false)); + EXPECT_CALL(*this, ShouldOpenDownload(_, _)).WillRepeatedly(Return(true)); + } +}; + +class MockRequestHandle : public DownloadRequestHandleInterface { + public: + MOCK_METHOD0(PauseRequest, void()); + MOCK_METHOD0(ResumeRequest, void()); + MOCK_METHOD1(CancelRequest, void(bool)); +}; + +class TestDownloadItemObserver : public DownloadItem::Observer { + public: + explicit TestDownloadItemObserver(DownloadItem* item) + : item_(item), + last_state_(item->GetState()), + removed_(false), + destroyed_(false), + updated_(false), + interrupt_count_(0), + resume_count_(0) { + item_->AddObserver(this); + } + + ~TestDownloadItemObserver() override { + if (item_) + item_->RemoveObserver(this); + } + + bool download_removed() const { return removed_; } + bool download_destroyed() const { return destroyed_; } + int interrupt_count() const { return interrupt_count_; } + int resume_count() const { return resume_count_; } + + bool CheckAndResetDownloadUpdated() { + bool was_updated = updated_; + updated_ = false; + return was_updated; + } + + private: + void OnDownloadRemoved(DownloadItem* download) override { + SCOPED_TRACE(::testing::Message() << " " << __FUNCTION__ << " download = " + << download->DebugString(false)); + removed_ = true; + } + + void OnDownloadUpdated(DownloadItem* download) override { + DVLOG(20) << " " << __FUNCTION__ + << " download = " << download->DebugString(false); + updated_ = true; + DownloadItem::DownloadState new_state = download->GetState(); + if (last_state_ == DownloadItem::IN_PROGRESS && + new_state == DownloadItem::INTERRUPTED) { + interrupt_count_++; + } + if (last_state_ == DownloadItem::INTERRUPTED && + new_state == DownloadItem::IN_PROGRESS) { + resume_count_++; + } + last_state_ = new_state; + } + + void OnDownloadOpened(DownloadItem* download) override { + DVLOG(20) << " " << __FUNCTION__ + << " download = " << download->DebugString(false); + } + + void OnDownloadDestroyed(DownloadItem* download) override { + DVLOG(20) << " " << __FUNCTION__ + << " download = " << download->DebugString(false); + destroyed_ = true; + item_->RemoveObserver(this); + item_ = nullptr; + } + + DownloadItem* item_; + DownloadItem::DownloadState last_state_; + bool removed_; + bool destroyed_; + bool updated_; + int interrupt_count_; + int resume_count_; +}; + +// Schedules a task to invoke the RenameCompletionCallback with |new_path| on +// the |task_runner|. Should only be used as the action for +// MockDownloadFile::RenameAndUniquify as follows: +// EXPECT_CALL(download_file, RenameAndUniquify(_,_)) +// .WillOnce(ScheduleRenameAndUniquifyCallback( +// DOWNLOAD_INTERRUPT_REASON_NONE, new_path, task_runner)); +ACTION_P3(ScheduleRenameAndUniquifyCallback, + interrupt_reason, + new_path, + task_runner) { + task_runner->PostTask(FROM_HERE, + base::BindOnce(arg1, interrupt_reason, new_path)); +} + +// Schedules a task to invoke the RenameCompletionCallback with |new_path| on +// the |task_runner|. Should only be used as the action for +// MockDownloadFile::RenameAndAnnotate as follows: +// EXPECT_CALL(download_file, RenameAndAnnotate(_,_,_,_,_)) +// .WillOnce(ScheduleRenameAndAnnotateCallback( +// DOWNLOAD_INTERRUPT_REASON_NONE, new_path, task_runner)); +ACTION_P3(ScheduleRenameAndAnnotateCallback, + interrupt_reason, + new_path, + task_runner) { + task_runner->PostTask(FROM_HERE, + base::BindOnce(arg4, interrupt_reason, new_path)); +} + +// Schedules a task to invoke a callback that's bound to the specified +// parameter. +// E.g.: +// +// EXPECT_CALL(foo, Bar(1, _)) +// .WithArg<1>(ScheduleCallbackWithParams(0, 0, task_runner)); +// +// .. will invoke the second argument to Bar with 0 as the parameter. +ACTION_P3(ScheduleCallbackWithParams, param1, param2, task_runner) { + task_runner->PostTask(FROM_HERE, + base::BindOnce(std::move(arg0), param1, param2)); +} + +const char kTestData1[] = {'M', 'a', 'r', 'y', ' ', 'h', 'a', 'd', + ' ', 'a', ' ', 'l', 'i', 't', 't', 'l', + 'e', ' ', 'l', 'a', 'm', 'b', '.'}; + +// SHA256 hash of TestData1 +const uint8_t kHashOfTestData1[] = { + 0xd2, 0xfc, 0x16, 0xa1, 0xf5, 0x1a, 0x65, 0x3a, 0xa0, 0x19, 0x64, + 0xef, 0x9c, 0x92, 0x33, 0x36, 0xe1, 0x06, 0x53, 0xfe, 0xc1, 0x95, + 0xf4, 0x93, 0x45, 0x8b, 0x3b, 0x21, 0x89, 0x0e, 0x1b, 0x97}; + +} // namespace + +class DownloadItemTest : public testing::Test { + public: + DownloadItemTest() + : task_environment_( + base::test::ScopedTaskEnvironment::MainThreadType::UI, + base::test::ScopedTaskEnvironment::ExecutionMode::QUEUED), + next_download_id_(DownloadItem::kInvalidId + 1) { + create_info_.reset(new DownloadCreateInfo()); + create_info_->save_info = + std::unique_ptr<DownloadSaveInfo>(new DownloadSaveInfo()); + create_info_->save_info->prompt_for_save_location = false; + create_info_->url_chain.push_back(GURL("http://example.com/download")); + create_info_->etag = "SomethingToSatisfyResumption"; + } + + DownloadItemImpl* CreateDownloadItemWithCreateInfo( + std::unique_ptr<DownloadCreateInfo> info) { + DownloadItemImpl* download = new DownloadItemImpl( + mock_delegate(), next_download_id_++, *(info.get())); + allocated_downloads_[download] = base::WrapUnique(download); + return download; + } + + // Creates a new net::HttpResponseHeaders object for the |response_code|. + scoped_refptr<const net::HttpResponseHeaders> CreateResponseHeaders( + int response_code) { + return base::MakeRefCounted<net::HttpResponseHeaders>( + "HTTP/1.1 " + std::to_string(response_code)); + } + + // This class keeps ownership of the created download item; it will + // be torn down at the end of the test unless DestroyDownloadItem is + // called. + DownloadItemImpl* CreateDownloadItem() { + create_info_->download_id = ++next_download_id_; + DownloadItemImpl* download = new DownloadItemImpl( + mock_delegate(), create_info_->download_id, *create_info_); + allocated_downloads_[download] = base::WrapUnique(download); + return download; + } + + // Add DownloadFile to DownloadItem. + MockDownloadFile* CallDownloadItemStart( + DownloadItemImpl* item, + DownloadItemImplDelegate::DownloadTargetCallback* callback) { + MockDownloadFile* mock_download_file = nullptr; + std::unique_ptr<DownloadFile> download_file; + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(item, _)) + .WillOnce(SaveArg<1>(callback)); + + // Only create a DownloadFile if the request was successful. + if (create_info_->result == DOWNLOAD_INTERRUPT_REASON_NONE) { + mock_download_file = new StrictMock<MockDownloadFile>; + download_file.reset(mock_download_file); + EXPECT_CALL(*mock_download_file, Initialize(_, _, _, _)) + .WillOnce( + ScheduleCallbackWithParams(DOWNLOAD_INTERRUPT_REASON_NONE, 0, + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*mock_download_file, FullPath()) + .WillRepeatedly(ReturnRefOfCopy(base::FilePath())); + } + + std::unique_ptr<MockRequestHandle> request_handle = + std::make_unique<NiceMock<MockRequestHandle>>(); + item->Start(std::move(download_file), std::move(request_handle), + *create_info_, nullptr, nullptr); + task_environment_.RunUntilIdle(); + + // So that we don't have a function writing to a stack variable + // lying around if the above failed. + mock_delegate()->VerifyAndClearExpectations(); + EXPECT_CALL(*mock_delegate(), AssertStateConsistent(_)) + .WillRepeatedly(Return()); + EXPECT_CALL(*mock_delegate(), ShouldOpenFileBasedOnExtension(_)) + .WillRepeatedly(Return(false)); + EXPECT_CALL(*mock_delegate(), ShouldOpenDownload(_, _)) + .WillRepeatedly(Return(true)); + + return mock_download_file; + } + + // Perform the intermediate rename for |item|. The target path for the + // download will be set to kDummyTargetPath. Returns the MockDownloadFile* + // that was added to the DownloadItem. + MockDownloadFile* DoIntermediateRename(DownloadItemImpl* item, + DownloadDangerType danger_type) { + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_TRUE(item->GetTargetFilePath().empty()); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + base::FilePath target_path(kDummyTargetPath); + base::FilePath intermediate_path(kDummyIntermediatePath); + EXPECT_CALL(*download_file, RenameAndUniquify(intermediate_path, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + callback.Run(target_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + danger_type, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + return download_file; + } + + void DoDestinationComplete(DownloadItemImpl* item, + MockDownloadFile* download_file) { + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(_, _)) + .WillOnce(Return(true)); + base::FilePath final_path(kDummyTargetPath); + EXPECT_CALL(*download_file, RenameAndAnnotate(_, _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, final_path, + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, FullPath()) + .WillRepeatedly(ReturnRefOfCopy(base::FilePath(kDummyTargetPath))); + EXPECT_CALL(*download_file, Detach()); + + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + } + + // Cleanup a download item (specifically get rid of the DownloadFile on it). + // The item must be in the expected state. + void CleanupItem(DownloadItemImpl* item, + MockDownloadFile* download_file, + DownloadItem::DownloadState expected_state) { + EXPECT_EQ(expected_state, item->GetState()); + + if (expected_state == DownloadItem::IN_PROGRESS) { + if (download_file) + EXPECT_CALL(*download_file, Cancel()); + item->Cancel(true); + task_environment_.RunUntilIdle(); + } + } + + // Destroy a previously created download item. + void DestroyDownloadItem(DownloadItem* item) { + allocated_downloads_.erase(item); + } + + MockDelegate* mock_delegate() { return &mock_delegate_; } + + void OnDownloadFileAcquired(base::FilePath* return_path, + const base::FilePath& path) { + *return_path = path; + } + + DownloadCreateInfo* create_info() { return create_info_.get(); } + + base::test::ScopedTaskEnvironment task_environment_; + + private: + StrictMock<MockDelegate> mock_delegate_; + std::map<DownloadItem*, std::unique_ptr<DownloadItem>> allocated_downloads_; + std::unique_ptr<DownloadCreateInfo> create_info_; + uint32_t next_download_id_ = DownloadItem::kInvalidId + 1; +}; + +// Tests to ensure calls that change a DownloadItem generate an update +// to observers. State changing functions not tested: +// void OpenDownload(); +// void ShowDownloadInShell(); +// void CompleteDelayedDownload(); +// set_* mutators + +TEST_F(DownloadItemTest, NotificationAfterUpdate) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + ASSERT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + TestDownloadItemObserver observer(item); + + item->DestinationUpdate(kDownloadChunkSize, kDownloadSpeed, + std::vector<DownloadItem::ReceivedSlice>()); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + EXPECT_EQ(kDownloadSpeed, item->CurrentSpeed()); + CleanupItem(item, file, DownloadItem::IN_PROGRESS); +} + +TEST_F(DownloadItemTest, NotificationAfterCancel) { + DownloadItemImpl* user_cancel = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + MockDownloadFile* download_file = + CallDownloadItemStart(user_cancel, &target_callback); + EXPECT_CALL(*download_file, Cancel()); + + TestDownloadItemObserver observer1(user_cancel); + user_cancel->Cancel(true); + ASSERT_TRUE(observer1.CheckAndResetDownloadUpdated()); + + DownloadItemImpl* system_cancel = CreateDownloadItem(); + download_file = CallDownloadItemStart(system_cancel, &target_callback); + EXPECT_CALL(*download_file, Cancel()); + + TestDownloadItemObserver observer2(system_cancel); + system_cancel->Cancel(false); + ASSERT_TRUE(observer2.CheckAndResetDownloadUpdated()); +} + +TEST_F(DownloadItemTest, NotificationAfterComplete) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + DoDestinationComplete(item, download_file); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); +} + +TEST_F(DownloadItemTest, NotificationAfterDownloadedFileRemoved) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + + item->OnDownloadedFileRemoved(); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); +} + +TEST_F(DownloadItemTest, NotificationAfterInterrupted) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + EXPECT_CALL(*download_file, Cancel()); + TestDownloadItemObserver observer(item); + + EXPECT_CALL(*mock_delegate(), MockResumeInterruptedDownload(_, _)).Times(0); + + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, 0, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); +} + +TEST_F(DownloadItemTest, NotificationAfterDestroyed) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + + DestroyDownloadItem(item); + ASSERT_TRUE(observer.download_destroyed()); +} + +TEST_F(DownloadItemTest, NotificationAfterRemove) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + MockDownloadFile* download_file = + CallDownloadItemStart(item, &target_callback); + EXPECT_CALL(*download_file, Cancel()); + EXPECT_CALL(*mock_delegate(), DownloadRemoved(_)); + TestDownloadItemObserver observer(item); + + item->Remove(); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + ASSERT_TRUE(observer.download_removed()); +} + +TEST_F(DownloadItemTest, NotificationAfterOnContentCheckCompleted) { + // Setting to NOT_DANGEROUS does not trigger a notification. + DownloadItemImpl* safe_item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(safe_item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + TestDownloadItemObserver safe_observer(safe_item); + + safe_item->OnAllDataSaved(0, std::unique_ptr<crypto::SecureHash>()); + EXPECT_TRUE(safe_observer.CheckAndResetDownloadUpdated()); + safe_item->OnContentCheckCompleted(DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, + DOWNLOAD_INTERRUPT_REASON_NONE); + EXPECT_TRUE(safe_observer.CheckAndResetDownloadUpdated()); + CleanupItem(safe_item, download_file, DownloadItem::IN_PROGRESS); + + // Setting to unsafe url or unsafe file should trigger a notification. + DownloadItemImpl* unsafeurl_item = CreateDownloadItem(); + download_file = + DoIntermediateRename(unsafeurl_item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + TestDownloadItemObserver unsafeurl_observer(unsafeurl_item); + + unsafeurl_item->OnAllDataSaved(0, std::unique_ptr<crypto::SecureHash>()); + EXPECT_TRUE(unsafeurl_observer.CheckAndResetDownloadUpdated()); + unsafeurl_item->OnContentCheckCompleted(DOWNLOAD_DANGER_TYPE_DANGEROUS_URL, + DOWNLOAD_INTERRUPT_REASON_NONE); + EXPECT_TRUE(unsafeurl_observer.CheckAndResetDownloadUpdated()); + + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(_, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, RenameAndAnnotate(_, _, _, _, _)); + unsafeurl_item->ValidateDangerousDownload(); + EXPECT_TRUE(unsafeurl_observer.CheckAndResetDownloadUpdated()); + CleanupItem(unsafeurl_item, download_file, DownloadItem::IN_PROGRESS); + + DownloadItemImpl* unsafefile_item = CreateDownloadItem(); + download_file = + DoIntermediateRename(unsafefile_item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + TestDownloadItemObserver unsafefile_observer(unsafefile_item); + + unsafefile_item->OnAllDataSaved(0, std::unique_ptr<crypto::SecureHash>()); + EXPECT_TRUE(unsafefile_observer.CheckAndResetDownloadUpdated()); + unsafefile_item->OnContentCheckCompleted(DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE, + DOWNLOAD_INTERRUPT_REASON_NONE); + EXPECT_TRUE(unsafefile_observer.CheckAndResetDownloadUpdated()); + + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(_, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, RenameAndAnnotate(_, _, _, _, _)); + unsafefile_item->ValidateDangerousDownload(); + EXPECT_TRUE(unsafefile_observer.CheckAndResetDownloadUpdated()); + CleanupItem(unsafefile_item, download_file, DownloadItem::IN_PROGRESS); +} + +// DownloadItemImpl::OnDownloadTargetDetermined will schedule a task to run +// DownloadFile::Rename(). Once the rename +// completes, DownloadItemImpl receives a notification with the new file +// name. Check that observers are updated when the new filename is available and +// not before. +TEST_F(DownloadItemTest, NotificationAfterOnDownloadTargetDetermined) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + TestDownloadItemObserver observer(item); + base::FilePath target_path(kDummyTargetPath); + base::FilePath intermediate_path(target_path.InsertBeforeExtensionASCII("x")); + base::FilePath new_intermediate_path( + target_path.InsertBeforeExtensionASCII("y")); + EXPECT_CALL(*download_file, RenameAndUniquify(intermediate_path, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, new_intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + + // Currently, a notification would be generated if the danger type is anything + // other than NOT_DANGEROUS. + callback.Run(target_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + EXPECT_FALSE(observer.CheckAndResetDownloadUpdated()); + task_environment_.RunUntilIdle(); + EXPECT_TRUE(observer.CheckAndResetDownloadUpdated()); + EXPECT_EQ(new_intermediate_path, item->GetFullPath()); + + CleanupItem(item, download_file, DownloadItem::IN_PROGRESS); +} + +TEST_F(DownloadItemTest, NotificationAfterTogglePause) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* mock_download_file(new MockDownloadFile); + std::unique_ptr<DownloadFile> download_file(mock_download_file); + std::unique_ptr<DownloadRequestHandleInterface> request_handle( + new NiceMock<MockRequestHandle>); + + EXPECT_CALL(*mock_download_file, Initialize(_, _, _, _)); + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(_, _)); + item->Start(std::move(download_file), std::move(request_handle), + *create_info(), nullptr, nullptr); + + item->Pause(); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + + ASSERT_TRUE(item->IsPaused()); + + item->Resume(); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + + task_environment_.RunUntilIdle(); + + CleanupItem(item, mock_download_file, DownloadItem::IN_PROGRESS); +} + +// Test that a download is resumed automatically after a continuable interrupt. +TEST_F(DownloadItemTest, AutomaticResumption_Continue) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Interrupt the download using a continuable interrupt after writing a single + // byte. An intermediate file with data shouldn't be discarding after a + // continuable interrupt. + + // The DownloadFile should be detached without discarding. + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + + // Resumption attempt should pass the intermediate file along. + EXPECT_CALL(*mock_delegate(), + MockResumeInterruptedDownload( + AllOf(Property(&DownloadUrlParameters::file_path, + Property(&base::FilePath::value, + kDummyIntermediatePath)), + Property(&DownloadUrlParameters::offset, 1)), + _)); + + base::HistogramTester histogram_tester; + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR, 1, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + // Since the download is resumed automatically, the interrupt count doesn't + // increase. + ASSERT_EQ(0, observer.interrupt_count()); + + // Test expectations verify that ResumeInterruptedDownload() is called (by way + // of MockResumeInterruptedDownload) after the download is interrupted. But + // the mock doesn't follow through with the resumption. + // ResumeInterruptedDownload() being called is sufficient for verifying that + // the automatic resumption was triggered. + task_environment_.RunUntilIdle(); + + // Interrupt reason is recorded in auto resumption even when download is not + // finally interrupted. + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR), + 1); + + // The download item is currently in RESUMING_INTERNAL state, which maps to + // IN_PROGRESS. + CleanupItem(item, nullptr, DownloadItem::IN_PROGRESS); +} + +// Automatic resumption should restart and discard the intermediate file if the +// interrupt reason requires it. +TEST_F(DownloadItemTest, AutomaticResumption_Restart) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Interrupt the download, using a restartable interrupt. + EXPECT_CALL(*download_file, Cancel()); + EXPECT_EQ(kDummyIntermediatePath, item->GetFullPath().value()); + + // Resumption attempt should have discarded intermediate file. + EXPECT_CALL(*mock_delegate(), + MockResumeInterruptedDownload( + Property(&DownloadUrlParameters::file_path, + Property(&base::FilePath::empty, true)), + _)); + + base::HistogramTester histogram_tester; + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE, 1, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + + // Since the download is resumed automatically, the interrupt count doesn't + // increase. + ASSERT_EQ(0, observer.interrupt_count()); + + task_environment_.RunUntilIdle(); + // Auto resumption will record interrupt reason even if download is not + // finally interrupted. + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE), + 1); + CleanupItem(item, nullptr, DownloadItem::IN_PROGRESS); +} + +// Test that automatic resumption doesn't happen after an interrupt that +// requires user action to resolve. +TEST_F(DownloadItemTest, AutomaticResumption_NeedsUserAction) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Interrupt the download, using a restartable interrupt. + EXPECT_CALL(*download_file, Cancel()); + base::HistogramTester histogram_tester; + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, 1, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + // Should not try to auto-resume. + ASSERT_EQ(1, observer.interrupt_count()); + ASSERT_EQ(0, observer.resume_count()); + + task_environment_.RunUntilIdle(); + histogram_tester.ExpectBucketCount("Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED), + 1); + CleanupItem(item, nullptr, DownloadItem::INTERRUPTED); +} + +// Test that a download is resumed automatically after a content length mismatch +// error. +TEST_F(DownloadItemTest, AutomaticResumption_ContentLengthMismatch) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Interrupt the download with content length mismatch error. The intermediate + // file with data shouldn't be discarded. + + // The DownloadFile should be detached without discarding. + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + + // Resumption attempt should pass the intermediate file along. + EXPECT_CALL(*mock_delegate(), + MockResumeInterruptedDownload( + AllOf(Property(&DownloadUrlParameters::file_path, + Property(&base::FilePath::value, + kDummyIntermediatePath)), + Property(&DownloadUrlParameters::offset, 1)), + _)); + + base::HistogramTester histogram_tester; + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_SERVER_CONTENT_LENGTH_MISMATCH, 1, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + // Since the download is resumed automatically, the observer shouldn't notice + // the interruption. + ASSERT_EQ(0, observer.interrupt_count()); + ASSERT_EQ(0, observer.resume_count()); + + task_environment_.RunUntilIdle(); + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_SERVER_CONTENT_LENGTH_MISMATCH), + 1); + CleanupItem(item, nullptr, DownloadItem::IN_PROGRESS); +} + +// Check we do correct cleanup for RESUME_MODE_INVALID interrupts. +TEST_F(DownloadItemTest, UnresumableInterrupt) { + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Fail final rename with unresumable reason. + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, + RenameAndAnnotate(base::FilePath(kDummyTargetPath), _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_FILE_BLOCKED, base::FilePath(), + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, Cancel()); + + // Complete download to trigger final rename. + base::HistogramTester histogram_tester; + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + + task_environment_.RunUntilIdle(); + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_BLOCKED), + 1); + ASSERT_TRUE(observer.CheckAndResetDownloadUpdated()); + // Should not try to auto-resume. + ASSERT_EQ(1, observer.interrupt_count()); + ASSERT_EQ(0, observer.resume_count()); + + CleanupItem(item, nullptr, DownloadItem::INTERRUPTED); +} + +TEST_F(DownloadItemTest, AutomaticResumption_AttemptLimit) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + base::WeakPtr<DownloadDestinationObserver> as_observer( + item->DestinationObserverAsWeakPtr()); + TestDownloadItemObserver observer(item); + MockDownloadFile* mock_download_file_ref = nullptr; + std::unique_ptr<MockDownloadFile> mock_download_file; + std::unique_ptr<MockRequestHandle> mock_request_handle; + DownloadItemImplDelegate::DownloadTargetCallback callback; + + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(item, _)) + .WillRepeatedly(SaveArg<1>(&callback)); + + // All attempts at resumption should pass along the intermediate file. + EXPECT_CALL(*mock_delegate(), + MockResumeInterruptedDownload( + AllOf(Property(&DownloadUrlParameters::file_path, + Property(&base::FilePath::value, + kDummyIntermediatePath)), + Property(&DownloadUrlParameters::offset, 1)), + _)) + .Times(DownloadItemImpl::kMaxAutoResumeAttempts); + for (int i = 0; i < (DownloadItemImpl::kMaxAutoResumeAttempts + 1); ++i) { + SCOPED_TRACE(::testing::Message() << "Iteration " << i); + + mock_download_file = std::make_unique<NiceMock<MockDownloadFile>>(); + mock_download_file_ref = mock_download_file.get(); + mock_request_handle = std::make_unique<NiceMock<MockRequestHandle>>(); + + ON_CALL(*mock_download_file_ref, FullPath()) + .WillByDefault(ReturnRefOfCopy(base::FilePath())); + + // Copied key parts of DoIntermediateRename & CallDownloadItemStart + // to allow for holding onto the request handle. + item->Start(std::move(mock_download_file), std::move(mock_request_handle), + *create_info(), nullptr, nullptr); + task_environment_.RunUntilIdle(); + + base::FilePath target_path(kDummyTargetPath); + base::FilePath intermediate_path(kDummyIntermediatePath); + + // Target of RenameAndUniquify is always the intermediate path. + ON_CALL(*mock_download_file_ref, RenameAndUniquify(_, _)) + .WillByDefault(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + + // RenameAndUniquify is only called the first time. In all the subsequent + // iterations, the intermediate file already has the correct name, hence no + // rename is necessary. + EXPECT_CALL(*mock_download_file_ref, RenameAndUniquify(_, _)).Times(i == 0); + + ASSERT_FALSE(callback.is_null()); + base::ResetAndReturn(&callback).Run( + target_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + + // Use a continuable interrupt. + EXPECT_CALL(*mock_download_file_ref, Cancel()).Times(0); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR, 1, + std::unique_ptr<crypto::SecureHash>()); + + task_environment_.RunUntilIdle(); + ::testing::Mock::VerifyAndClearExpectations(mock_download_file_ref); + } + + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR), + DownloadItemImpl::kMaxAutoResumeAttempts + 1); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(1, observer.interrupt_count()); + CleanupItem(item, nullptr, DownloadItem::INTERRUPTED); +} + +// If the download attempts to resume and the resumption request fails, the +// subsequent Start() call shouldn't update the origin state (URL redirect +// chains, Content-Disposition, download URL, etc..) +TEST_F(DownloadItemTest, FailedResumptionDoesntUpdateOriginState) { + constexpr int kFirstResponseCode = 200; + const char kContentDisposition[] = "attachment; filename=foo"; + const char kFirstETag[] = "ABC"; + const char kFirstLastModified[] = "Yesterday"; + const char kFirstURL[] = "http://www.example.com/download"; + const char kMimeType[] = "text/css"; + create_info()->response_headers = CreateResponseHeaders(kFirstResponseCode); + create_info()->content_disposition = kContentDisposition; + create_info()->etag = kFirstETag; + create_info()->last_modified = kFirstLastModified; + create_info()->url_chain.push_back(GURL(kFirstURL)); + create_info()->mime_type = kMimeType; + + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + ASSERT_TRUE(item->GetResponseHeaders()); + EXPECT_EQ(kFirstResponseCode, item->GetResponseHeaders()->response_code()); + EXPECT_EQ(kContentDisposition, item->GetContentDisposition()); + EXPECT_EQ(kFirstETag, item->GetETag()); + EXPECT_EQ(kFirstLastModified, item->GetLastModifiedTime()); + EXPECT_EQ(kFirstURL, item->GetURL().spec()); + EXPECT_EQ(kMimeType, item->GetMimeType()); + + EXPECT_CALL(*mock_delegate(), + MockResumeInterruptedDownload( + AllOf(Property(&DownloadUrlParameters::file_path, + Property(&base::FilePath::value, + kDummyIntermediatePath)), + Property(&DownloadUrlParameters::offset, 1)), + _)); + EXPECT_CALL(*download_file, Detach()); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR, 1, + std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + + // Now change the create info. The changes should not cause the + // DownloadItem to be updated. + constexpr int kSecondResponseCode = 418; + const char kSecondContentDisposition[] = "attachment; filename=bar"; + const char kSecondETag[] = "123"; + const char kSecondLastModified[] = "Today"; + const char kSecondURL[] = "http://example.com/another-download"; + const char kSecondMimeType[] = "text/html"; + create_info()->response_headers = CreateResponseHeaders(kSecondResponseCode); + create_info()->content_disposition = kSecondContentDisposition; + create_info()->etag = kSecondETag; + create_info()->last_modified = kSecondLastModified; + create_info()->url_chain.clear(); + create_info()->url_chain.push_back(GURL(kSecondURL)); + create_info()->mime_type = kSecondMimeType; + create_info()->result = DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED; + create_info()->save_info->file_path = base::FilePath(kDummyIntermediatePath); + create_info()->save_info->offset = 1; + + // Calling Start() with a response indicating failure shouldn't cause a target + // update, nor should it result in discarding the intermediate file. + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + download_file = CallDownloadItemStart(item, &target_callback); + ASSERT_FALSE(target_callback.is_null()); + target_callback.Run(base::FilePath(kDummyTargetPath), + DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, + base::FilePath(kDummyIntermediatePath), + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + + ASSERT_TRUE(item->GetResponseHeaders()); + EXPECT_EQ(kFirstResponseCode, item->GetResponseHeaders()->response_code()); + EXPECT_EQ(kContentDisposition, item->GetContentDisposition()); + EXPECT_EQ(kFirstETag, item->GetETag()); + EXPECT_EQ(kFirstLastModified, item->GetLastModifiedTime()); + EXPECT_EQ(kFirstURL, item->GetURL().spec()); + EXPECT_EQ(kMimeType, item->GetMimeType()); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, item->GetLastReason()); + EXPECT_EQ(kDummyIntermediatePath, item->GetFullPath().value()); + EXPECT_EQ(1, item->GetReceivedBytes()); +} + +// If the download resumption request succeeds, the origin state should be +// updated. +TEST_F(DownloadItemTest, SucceededResumptionUpdatesOriginState) { + constexpr int kFirstResponseCode = 200; + const char kContentDisposition[] = "attachment; filename=foo"; + const char kFirstETag[] = "ABC"; + const char kFirstLastModified[] = "Yesterday"; + const char kFirstURL[] = "http://www.example.com/download"; + const char kMimeType[] = "text/css"; + create_info()->response_headers = CreateResponseHeaders(kFirstResponseCode); + create_info()->content_disposition = kContentDisposition; + create_info()->etag = kFirstETag; + create_info()->last_modified = kFirstLastModified; + create_info()->url_chain.push_back(GURL(kFirstURL)); + create_info()->mime_type = kMimeType; + + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + EXPECT_CALL(*mock_delegate(), MockResumeInterruptedDownload(_, _)); + EXPECT_CALL(*download_file, Detach()); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR, 0, + std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + + // Now change the create info. The changes should not cause the + // DownloadItem to be updated. + constexpr int kSecondResponseCode = 201; + const char kSecondContentDisposition[] = "attachment; filename=bar"; + const char kSecondETag[] = "123"; + const char kSecondLastModified[] = "Today"; + const char kSecondURL[] = "http://example.com/another-download"; + const char kSecondMimeType[] = "text/html"; + create_info()->response_headers = CreateResponseHeaders(kSecondResponseCode); + create_info()->content_disposition = kSecondContentDisposition; + create_info()->etag = kSecondETag; + create_info()->last_modified = kSecondLastModified; + create_info()->url_chain.clear(); + create_info()->url_chain.push_back(GURL(kSecondURL)); + create_info()->mime_type = kSecondMimeType; + + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + download_file = CallDownloadItemStart(item, &target_callback); + + ASSERT_TRUE(item->GetResponseHeaders()); + EXPECT_EQ(kSecondResponseCode, item->GetResponseHeaders()->response_code()); + EXPECT_EQ(kSecondContentDisposition, item->GetContentDisposition()); + EXPECT_EQ(kSecondETag, item->GetETag()); + EXPECT_EQ(kSecondLastModified, item->GetLastModifiedTime()); + EXPECT_EQ(kSecondURL, item->GetURL().spec()); + EXPECT_EQ(kSecondMimeType, item->GetMimeType()); + + CleanupItem(item, download_file, DownloadItem::IN_PROGRESS); +} + +// Ensure when strong validators changed on resumption, the received +// slices should be cleared. +TEST_F(DownloadItemTest, ClearReceivedSliceIfEtagChanged) { + const char kFirstETag[] = "ABC"; + const char kSecondETag[] = "123"; + const DownloadItem::ReceivedSlices kReceivedSlice = { + DownloadItem::ReceivedSlice(0, 10)}; + create_info()->etag = kFirstETag; + + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + EXPECT_CALL(*mock_delegate(), MockResumeInterruptedDownload(_, _)); + EXPECT_CALL(*download_file, Detach()); + + item->DestinationObserverAsWeakPtr()->DestinationUpdate(10, 100, + kReceivedSlice); + EXPECT_EQ(kReceivedSlice, item->GetReceivedSlices()); + EXPECT_EQ(10, item->GetReceivedBytes()); + + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR, 0, + std::unique_ptr<crypto::SecureHash>()); + EXPECT_EQ(kReceivedSlice, item->GetReceivedSlices()); + + task_environment_.RunUntilIdle(); + + // Change the strong validator and resume the download, the received slices + // should be cleared. + create_info()->etag = kSecondETag; + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + download_file = CallDownloadItemStart(item, &target_callback); + EXPECT_TRUE(item->GetReceivedSlices().empty()); + EXPECT_EQ(0, item->GetReceivedBytes()); + + CleanupItem(item, download_file, DownloadItem::IN_PROGRESS); +} + +// Test that resumption uses the final URL in a URL chain when resuming. +TEST_F(DownloadItemTest, ResumeUsesFinalURL) { + create_info()->save_info->prompt_for_save_location = false; + create_info()->url_chain.clear(); + create_info()->url_chain.push_back(GURL("http://example.com/a")); + create_info()->url_chain.push_back(GURL("http://example.com/b")); + create_info()->url_chain.push_back(GURL("http://example.com/c")); + + DownloadItemImpl* item = CreateDownloadItem(); + TestDownloadItemObserver observer(item); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Interrupt the download, using a continuable interrupt. + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + EXPECT_CALL(*mock_delegate(), MockResumeInterruptedDownload( + Property(&DownloadUrlParameters::url, + GURL("http://example.com/c")), + _)) + .Times(1); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_TRANSIENT_ERROR, 1, + std::unique_ptr<crypto::SecureHash>()); + + // Test expectations verify that ResumeInterruptedDownload() is called (by way + // of MockResumeInterruptedDownload) after the download is interrupted. But + // the mock doesn't follow through with the resumption. + // ResumeInterruptedDownload() being called is sufficient for verifying that + // the resumption was triggered. + task_environment_.RunUntilIdle(); + + // The download is currently in RESUMING_INTERNAL, which maps to IN_PROGRESS. + CleanupItem(item, nullptr, DownloadItem::IN_PROGRESS); +} + +TEST_F(DownloadItemTest, DisplayName) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + base::FilePath target_path( + base::FilePath(kDummyTargetPath).AppendASCII("foo.bar")); + base::FilePath intermediate_path(target_path.InsertBeforeExtensionASCII("x")); + EXPECT_EQ(FILE_PATH_LITERAL(""), item->GetFileNameToReportUser().value()); + EXPECT_CALL(*download_file, RenameAndUniquify(_, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + callback.Run(target_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + EXPECT_EQ(FILE_PATH_LITERAL("foo.bar"), + item->GetFileNameToReportUser().value()); + item->SetDisplayName(base::FilePath(FILE_PATH_LITERAL("new.name"))); + EXPECT_EQ(FILE_PATH_LITERAL("new.name"), + item->GetFileNameToReportUser().value()); + CleanupItem(item, download_file, DownloadItem::IN_PROGRESS); +} + +// Test to make sure that Start method calls DF initialize properly. +TEST_F(DownloadItemTest, Start) { + MockDownloadFile* mock_download_file(new MockDownloadFile); + std::unique_ptr<DownloadFile> download_file(mock_download_file); + DownloadItemImpl* item = CreateDownloadItem(); + EXPECT_CALL(*mock_download_file, Initialize(_, _, _, _)); + std::unique_ptr<DownloadRequestHandleInterface> request_handle( + new NiceMock<MockRequestHandle>); + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(item, _)); + item->Start(std::move(download_file), std::move(request_handle), + *create_info(), nullptr, nullptr); + task_environment_.RunUntilIdle(); + + CleanupItem(item, mock_download_file, DownloadItem::IN_PROGRESS); +} + +// Download file and the request should be cancelled as a result of download +// file initialization failing. +TEST_F(DownloadItemTest, InitDownloadFileFails) { + DownloadItemImpl* item = CreateDownloadItem(); + std::unique_ptr<MockDownloadFile> file = std::make_unique<MockDownloadFile>(); + std::unique_ptr<MockRequestHandle> request_handle = + std::make_unique<MockRequestHandle>(); + + base::HistogramTester histogram_tester; + EXPECT_CALL(*file, Cancel()); + EXPECT_CALL(*request_handle, CancelRequest(_)); + EXPECT_CALL(*file, Initialize(_, _, _, _)) + .WillOnce(ScheduleCallbackWithParams( + DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED, 0, + base::ThreadTaskRunnerHandle::Get())); + + DownloadItemImplDelegate::DownloadTargetCallback download_target_callback; + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(item, _)) + .WillOnce(SaveArg<1>(&download_target_callback)); + + item->Start(std::move(file), std::move(request_handle), *create_info(), + nullptr, nullptr); + task_environment_.RunUntilIdle(); + + download_target_callback.Run(base::FilePath(kDummyTargetPath), + DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, + base::FilePath(kDummyIntermediatePath), + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED, + item->GetLastReason()); + EXPECT_FALSE(item->GetTargetFilePath().empty()); + EXPECT_TRUE(item->GetFullPath().empty()); + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED), + 1); +} + +// Handling of downloads initiated via a failed request. In this case, Start() +// will get called with a DownloadCreateInfo with a non-zero interrupt_reason. +TEST_F(DownloadItemTest, StartFailedDownload) { + base::HistogramTester histogram_tester; + create_info()->result = DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED; + DownloadItemImpl* item = CreateDownloadItem(); + + // DownloadFile and DownloadRequestHandleInterface objects aren't created for + // failed downloads. + std::unique_ptr<DownloadFile> null_download_file; + std::unique_ptr<DownloadRequestHandleInterface> null_request_handle; + DownloadItemImplDelegate::DownloadTargetCallback download_target_callback; + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(item, _)) + .WillOnce(SaveArg<1>(&download_target_callback)); + item->Start(std::move(null_download_file), std::move(null_request_handle), + *create_info(), nullptr, nullptr); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + task_environment_.RunUntilIdle(); + + // The DownloadItemImpl should attempt to determine a target path even if the + // download was interrupted. + ASSERT_FALSE(download_target_callback.is_null()); + ASSERT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + base::FilePath target_path(FILE_PATH_LITERAL("foo")); + download_target_callback.Run(target_path, + DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, target_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + + // Interrupt reason carried in create info should be recorded. + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED), + 1); + EXPECT_EQ(target_path, item->GetTargetFilePath()); + CleanupItem(item, nullptr, DownloadItem::INTERRUPTED); +} + +// Test that the delegate is invoked after the download file is renamed. +TEST_F(DownloadItemTest, CallbackAfterRename) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + base::FilePath final_path( + base::FilePath(kDummyTargetPath).AppendASCII("foo.bar")); + base::FilePath intermediate_path(final_path.InsertBeforeExtensionASCII("x")); + base::FilePath new_intermediate_path( + final_path.InsertBeforeExtensionASCII("y")); + EXPECT_CALL(*download_file, RenameAndUniquify(intermediate_path, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, new_intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + + callback.Run(final_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + // All the callbacks should have happened by now. + ::testing::Mock::VerifyAndClearExpectations(download_file); + mock_delegate()->VerifyAndClearExpectations(); + + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, RenameAndAnnotate(final_path, _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, final_path, + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + ::testing::Mock::VerifyAndClearExpectations(download_file); + mock_delegate()->VerifyAndClearExpectations(); +} + +// Test that the delegate is invoked after the download file is renamed and the +// download item is in an interrupted state. +TEST_F(DownloadItemTest, CallbackAfterInterruptedRename) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + base::HistogramTester histogram_tester; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + base::FilePath final_path( + base::FilePath(kDummyTargetPath).AppendASCII("foo.bar")); + base::FilePath intermediate_path(final_path.InsertBeforeExtensionASCII("x")); + base::FilePath new_intermediate_path( + final_path.InsertBeforeExtensionASCII("y")); + EXPECT_CALL(*download_file, RenameAndUniquify(intermediate_path, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, new_intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, Cancel()).Times(1); + + callback.Run(final_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + // All the callbacks should have happened by now. + ::testing::Mock::VerifyAndClearExpectations(download_file); + mock_delegate()->VerifyAndClearExpectations(); + histogram_tester.ExpectBucketCount("Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED), + 1); +} + +TEST_F(DownloadItemTest, Interrupted) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + const DownloadInterruptReason reason( + DOWNLOAD_INTERRUPT_REASON_FILE_ACCESS_DENIED); + + // Confirm interrupt sets state properly. + EXPECT_CALL(*download_file, Cancel()); + item->DestinationObserverAsWeakPtr()->DestinationError( + reason, 0, std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(reason, item->GetLastReason()); + + // Cancel should kill it. + item->Cancel(true); + EXPECT_EQ(DownloadItem::CANCELLED, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_USER_CANCELED, item->GetLastReason()); + + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>(reason), 1); +} + +// Destination errors that occur before the intermediate rename shouldn't cause +// the download to be marked as interrupted until after the intermediate rename. +TEST_F(DownloadItemTest, InterruptedBeforeIntermediateRename_Restart) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, 0, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + + base::FilePath final_path( + base::FilePath(kDummyTargetPath).AppendASCII("foo.bar")); + base::FilePath intermediate_path(final_path.InsertBeforeExtensionASCII("x")); + base::FilePath new_intermediate_path( + final_path.InsertBeforeExtensionASCII("y")); + EXPECT_CALL(*download_file, RenameAndUniquify(intermediate_path, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, new_intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, Cancel()).Times(1); + + callback.Run(final_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + // All the callbacks should have happened by now. + ::testing::Mock::VerifyAndClearExpectations(download_file); + mock_delegate()->VerifyAndClearExpectations(); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_TRUE(item->GetFullPath().empty()); + EXPECT_EQ(final_path, item->GetTargetFilePath()); + histogram_tester.ExpectBucketCount("Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED), + 1); +} + +// As above. But if the download can be resumed by continuing, then the +// intermediate path should be retained when the download is interrupted after +// the intermediate rename succeeds. +TEST_F(DownloadItemTest, InterruptedBeforeIntermediateRename_Continue) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + + // Write some data and interrupt with NETWORK_FAILED. The download shouldn't + // transition to INTERRUPTED until the destination callback has been invoked. + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, 1, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + + base::FilePath final_path( + base::FilePath(kDummyTargetPath).AppendASCII("foo.bar")); + base::FilePath intermediate_path(final_path.InsertBeforeExtensionASCII("x")); + base::FilePath new_intermediate_path( + final_path.InsertBeforeExtensionASCII("y")); + EXPECT_CALL(*download_file, RenameAndUniquify(intermediate_path, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, new_intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath(new_intermediate_path))); + EXPECT_CALL(*download_file, Detach()); + + callback.Run(final_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + // All the callbacks should have happened by now. + ::testing::Mock::VerifyAndClearExpectations(download_file); + mock_delegate()->VerifyAndClearExpectations(); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(new_intermediate_path, item->GetFullPath()); + EXPECT_EQ(final_path, item->GetTargetFilePath()); + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED), + 1); +} + +// As above. If the intermediate rename fails, then the interrupt reason should +// be set to the file error and the intermediate path should be empty. +TEST_F(DownloadItemTest, InterruptedBeforeIntermediateRename_Failed) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, 0, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + + base::FilePath final_path( + base::FilePath(kDummyTargetPath).AppendASCII("foo.bar")); + base::FilePath intermediate_path(final_path.InsertBeforeExtensionASCII("x")); + base::FilePath new_intermediate_path( + final_path.InsertBeforeExtensionASCII("y")); + EXPECT_CALL(*download_file, RenameAndUniquify(intermediate_path, _)) + .WillOnce(ScheduleRenameAndUniquifyCallback( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, new_intermediate_path, + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, Cancel()).Times(1); + + callback.Run(final_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, intermediate_path, + DOWNLOAD_INTERRUPT_REASON_NONE); + task_environment_.RunUntilIdle(); + // All the callbacks should have happened by now. + ::testing::Mock::VerifyAndClearExpectations(download_file); + mock_delegate()->VerifyAndClearExpectations(); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, item->GetLastReason()); + EXPECT_TRUE(item->GetFullPath().empty()); + EXPECT_EQ(final_path, item->GetTargetFilePath()); + + // Rename error will overwrite the previous network interrupt reason. + // TODO(xingliu): See if we should report both interrupted reasons or the + // first one, see https://crbug.com/769040. + histogram_tester.ExpectBucketCount("Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED), + 1); + histogram_tester.ExpectTotalCount("Download.InterruptedReason", 1); +} + +TEST_F(DownloadItemTest, Canceled) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + MockDownloadFile* download_file = + CallDownloadItemStart(item, &target_callback); + + // Confirm cancel sets state properly. + EXPECT_CALL(*download_file, Cancel()); + item->Cancel(true); + EXPECT_EQ(DownloadItem::CANCELLED, item->GetState()); +} + +TEST_F(DownloadItemTest, DownloadTargetDetermined_Cancel) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + + EXPECT_CALL(*download_file, Cancel()); + callback.Run(base::FilePath(FILE_PATH_LITERAL("foo")), + DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, + base::FilePath(FILE_PATH_LITERAL("bar")), + DOWNLOAD_INTERRUPT_REASON_USER_CANCELED); + EXPECT_EQ(DownloadItem::CANCELLED, item->GetState()); +} + +TEST_F(DownloadItemTest, DownloadTargetDetermined_CancelWithEmptyName) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + + EXPECT_CALL(*download_file, Cancel()); + callback.Run(base::FilePath(), DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, base::FilePath(), + DOWNLOAD_INTERRUPT_REASON_NONE); + EXPECT_EQ(DownloadItem::CANCELLED, item->GetState()); +} + +TEST_F(DownloadItemTest, DownloadTargetDetermined_Conflict) { + DownloadItemImpl* item = CreateDownloadItem(); + DownloadItemImplDelegate::DownloadTargetCallback callback; + MockDownloadFile* download_file = CallDownloadItemStart(item, &callback); + base::FilePath target_path(FILE_PATH_LITERAL("/foo/bar")); + + EXPECT_CALL(*download_file, Cancel()); + callback.Run(target_path, DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, target_path, + DOWNLOAD_INTERRUPT_REASON_FILE_SAME_AS_SOURCE); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_SAME_AS_SOURCE, + item->GetLastReason()); +} + +TEST_F(DownloadItemTest, FileRemoved) { + DownloadItemImpl* item = CreateDownloadItem(); + + EXPECT_FALSE(item->GetFileExternallyRemoved()); + item->OnDownloadedFileRemoved(); + EXPECT_TRUE(item->GetFileExternallyRemoved()); +} + +TEST_F(DownloadItemTest, DestinationUpdate) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + base::WeakPtr<DownloadDestinationObserver> as_observer( + item->DestinationObserverAsWeakPtr()); + TestDownloadItemObserver observer(item); + + EXPECT_EQ(0l, item->CurrentSpeed()); + EXPECT_EQ(0l, item->GetReceivedBytes()); + EXPECT_EQ(0l, item->GetTotalBytes()); + EXPECT_FALSE(observer.CheckAndResetDownloadUpdated()); + item->SetTotalBytes(100l); + EXPECT_EQ(100l, item->GetTotalBytes()); + + std::vector<DownloadItem::ReceivedSlice> received_slices; + received_slices.emplace_back(0, 10); + as_observer->DestinationUpdate(10, 20, received_slices); + EXPECT_EQ(20l, item->CurrentSpeed()); + EXPECT_EQ(10l, item->GetReceivedBytes()); + EXPECT_EQ(100l, item->GetTotalBytes()); + EXPECT_EQ(received_slices, item->GetReceivedSlices()); + EXPECT_TRUE(observer.CheckAndResetDownloadUpdated()); + + received_slices.emplace_back(200, 100); + as_observer->DestinationUpdate(200, 20, received_slices); + EXPECT_EQ(20l, item->CurrentSpeed()); + EXPECT_EQ(200l, item->GetReceivedBytes()); + EXPECT_EQ(0l, item->GetTotalBytes()); + EXPECT_EQ(received_slices, item->GetReceivedSlices()); + EXPECT_TRUE(observer.CheckAndResetDownloadUpdated()); + + CleanupItem(item, file, DownloadItem::IN_PROGRESS); +} + +TEST_F(DownloadItemTest, DestinationError_NoRestartRequired) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + base::WeakPtr<DownloadDestinationObserver> as_observer( + item->DestinationObserverAsWeakPtr()); + TestDownloadItemObserver observer(item); + + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, item->GetLastReason()); + EXPECT_FALSE(observer.CheckAndResetDownloadUpdated()); + + std::unique_ptr<crypto::SecureHash> hash( + crypto::SecureHash::Create(crypto::SecureHash::SHA256)); + hash->Update(kTestData1, sizeof(kTestData1)); + + EXPECT_CALL(*download_file, Detach()); + as_observer->DestinationError(DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, 1, + std::move(hash)); + mock_delegate()->VerifyAndClearExpectations(); + EXPECT_TRUE(observer.CheckAndResetDownloadUpdated()); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, item->GetLastReason()); + EXPECT_EQ( + std::string(std::begin(kHashOfTestData1), std::end(kHashOfTestData1)), + item->GetHash()); + histogram_tester.ExpectBucketCount( + "Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED), + 1); +} + +TEST_F(DownloadItemTest, DestinationError_RestartRequired) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + base::WeakPtr<DownloadDestinationObserver> as_observer( + item->DestinationObserverAsWeakPtr()); + TestDownloadItemObserver observer(item); + + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, item->GetLastReason()); + EXPECT_FALSE(observer.CheckAndResetDownloadUpdated()); + + std::unique_ptr<crypto::SecureHash> hash( + crypto::SecureHash::Create(crypto::SecureHash::SHA256)); + hash->Update(kTestData1, sizeof(kTestData1)); + + EXPECT_CALL(*download_file, Cancel()); + as_observer->DestinationError(DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, 1, + std::move(hash)); + mock_delegate()->VerifyAndClearExpectations(); + EXPECT_TRUE(observer.CheckAndResetDownloadUpdated()); + EXPECT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, item->GetLastReason()); + EXPECT_EQ(std::string(), item->GetHash()); + histogram_tester.ExpectBucketCount("Download.InterruptedReason", + ToHistogramSample<DownloadInterruptReason>( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED), + 1); +} + +TEST_F(DownloadItemTest, DestinationCompleted) { + base::HistogramTester histogram_tester; + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + base::WeakPtr<DownloadDestinationObserver> as_observer( + item->DestinationObserverAsWeakPtr()); + TestDownloadItemObserver observer(item); + + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_EQ("", item->GetHash()); + EXPECT_FALSE(item->AllDataSaved()); + EXPECT_FALSE(observer.CheckAndResetDownloadUpdated()); + + as_observer->DestinationUpdate(10, 20, + std::vector<DownloadItem::ReceivedSlice>()); + EXPECT_TRUE(observer.CheckAndResetDownloadUpdated()); + EXPECT_FALSE(observer.CheckAndResetDownloadUpdated()); // Confirm reset. + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_EQ("", item->GetHash()); + EXPECT_FALSE(item->AllDataSaved()); + + std::unique_ptr<crypto::SecureHash> hash( + crypto::SecureHash::Create(crypto::SecureHash::SHA256)); + hash->Update(kTestData1, sizeof(kTestData1)); + + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(_, _)); + as_observer->DestinationCompleted(10, std::move(hash)); + mock_delegate()->VerifyAndClearExpectations(); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_TRUE(observer.CheckAndResetDownloadUpdated()); + EXPECT_EQ( + std::string(std::begin(kHashOfTestData1), std::end(kHashOfTestData1)), + item->GetHash()); + EXPECT_TRUE(item->AllDataSaved()); + + // Even though the DownloadItem receives a DestinationCompleted() + // event, target determination hasn't completed, hence the download item is + // stuck in TARGET_PENDING. + CleanupItem(item, download_file, DownloadItem::IN_PROGRESS); + + histogram_tester.ExpectTotalCount("Download.InterruptedReason", 0); +} + +TEST_F(DownloadItemTest, EnabledActionsForNormalDownload) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // InProgress + ASSERT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + ASSERT_FALSE(item->GetTargetFilePath().empty()); + EXPECT_TRUE(item->CanShowInFolder()); + EXPECT_TRUE(item->CanOpenDownload()); + + // Complete + EXPECT_CALL(*download_file, RenameAndAnnotate(_, _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, base::FilePath(kDummyTargetPath), + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + + ASSERT_EQ(DownloadItem::COMPLETE, item->GetState()); + EXPECT_TRUE(item->CanShowInFolder()); + EXPECT_TRUE(item->CanOpenDownload()); +} + +TEST_F(DownloadItemTest, EnabledActionsForTemporaryDownload) { + // A download created with a non-empty FilePath is considered a temporary + // download. + create_info()->save_info->file_path = base::FilePath(kDummyTargetPath); + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // InProgress Temporary + ASSERT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + ASSERT_FALSE(item->GetTargetFilePath().empty()); + ASSERT_TRUE(item->IsTemporary()); + EXPECT_FALSE(item->CanShowInFolder()); + EXPECT_FALSE(item->CanOpenDownload()); + + // Complete Temporary + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, RenameAndAnnotate(_, _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, base::FilePath(kDummyTargetPath), + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + + ASSERT_EQ(DownloadItem::COMPLETE, item->GetState()); + EXPECT_FALSE(item->CanShowInFolder()); + EXPECT_FALSE(item->CanOpenDownload()); +} + +TEST_F(DownloadItemTest, EnabledActionsForInterruptedDownload) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + EXPECT_CALL(*download_file, Cancel()); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, 0, + std::unique_ptr<crypto::SecureHash>()); + task_environment_.RunUntilIdle(); + + ASSERT_EQ(DownloadItem::INTERRUPTED, item->GetState()); + ASSERT_FALSE(item->GetTargetFilePath().empty()); + EXPECT_FALSE(item->CanShowInFolder()); + EXPECT_TRUE(item->CanOpenDownload()); +} + +TEST_F(DownloadItemTest, EnabledActionsForCancelledDownload) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + EXPECT_CALL(*download_file, Cancel()); + item->Cancel(true); + task_environment_.RunUntilIdle(); + + ASSERT_EQ(DownloadItem::CANCELLED, item->GetState()); + EXPECT_FALSE(item->CanShowInFolder()); + EXPECT_FALSE(item->CanOpenDownload()); +} + +// Test various aspects of the delegate completion blocker. + +// Just allowing completion. +TEST_F(DownloadItemTest, CompleteDelegate_ReturnTrue) { + // Test to confirm that if we have a callback that returns true, + // we complete immediately. + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Drive the delegate interaction. + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(Return(true)); + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_FALSE(item->IsDangerous()); + + // Make sure the download can complete. + EXPECT_CALL(*download_file, + RenameAndAnnotate(base::FilePath(kDummyTargetPath), _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, base::FilePath(kDummyTargetPath), + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*mock_delegate(), ShouldOpenDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::COMPLETE, item->GetState()); +} + +// Just delaying completion. +TEST_F(DownloadItemTest, CompleteDelegate_BlockOnce) { + // Test to confirm that if we have a callback that returns true, + // we complete immediately. + + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + base::Closure delegate_callback; + base::Closure copy_delegate_callback; + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(DoAll(SaveArg<1>(&delegate_callback), Return(false))) + .WillOnce(Return(true)); + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + ASSERT_FALSE(delegate_callback.is_null()); + copy_delegate_callback = delegate_callback; + delegate_callback.Reset(); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + std::move(copy_delegate_callback).Run(); + ASSERT_TRUE(delegate_callback.is_null()); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_FALSE(item->IsDangerous()); + + // Make sure the download can complete. + EXPECT_CALL(*download_file, + RenameAndAnnotate(base::FilePath(kDummyTargetPath), _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, base::FilePath(kDummyTargetPath), + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*mock_delegate(), ShouldOpenDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::COMPLETE, item->GetState()); +} + +// Delay and set danger. +TEST_F(DownloadItemTest, CompleteDelegate_SetDanger) { + // Test to confirm that if we have a callback that returns true, + // we complete immediately. + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Drive the delegate interaction. + base::Closure delegate_callback; + base::Closure copy_delegate_callback; + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(DoAll(SaveArg<1>(&delegate_callback), Return(false))) + .WillOnce(Return(true)); + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + ASSERT_FALSE(delegate_callback.is_null()); + copy_delegate_callback = delegate_callback; + delegate_callback.Reset(); + EXPECT_FALSE(item->IsDangerous()); + item->OnContentCheckCompleted(DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE, + DOWNLOAD_INTERRUPT_REASON_NONE); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + std::move(copy_delegate_callback).Run(); + ASSERT_TRUE(delegate_callback.is_null()); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_TRUE(item->IsDangerous()); + + // Make sure the download doesn't complete until we've validated it. + EXPECT_CALL(*download_file, + RenameAndAnnotate(base::FilePath(kDummyTargetPath), _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, base::FilePath(kDummyTargetPath), + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*mock_delegate(), ShouldOpenDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_TRUE(item->IsDangerous()); + + item->ValidateDangerousDownload(); + EXPECT_EQ(DOWNLOAD_DANGER_TYPE_USER_VALIDATED, item->GetDangerType()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::COMPLETE, item->GetState()); +} + +// Just delaying completion twice. +TEST_F(DownloadItemTest, CompleteDelegate_BlockTwice) { + // Test to confirm that if we have a callback that returns true, + // we complete immediately. + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); + + // Drive the delegate interaction. + base::Closure delegate_callback; + base::Closure copy_delegate_callback; + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(item, _)) + .WillOnce(DoAll(SaveArg<1>(&delegate_callback), Return(false))) + .WillOnce(DoAll(SaveArg<1>(&delegate_callback), Return(false))) + .WillOnce(Return(true)); + item->DestinationObserverAsWeakPtr()->DestinationCompleted( + 0, std::unique_ptr<crypto::SecureHash>()); + ASSERT_FALSE(delegate_callback.is_null()); + copy_delegate_callback = delegate_callback; + delegate_callback.Reset(); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + copy_delegate_callback.Run(); + ASSERT_FALSE(delegate_callback.is_null()); + copy_delegate_callback = delegate_callback; + delegate_callback.Reset(); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + std::move(copy_delegate_callback).Run(); + ASSERT_TRUE(delegate_callback.is_null()); + EXPECT_EQ(DownloadItem::IN_PROGRESS, item->GetState()); + EXPECT_FALSE(item->IsDangerous()); + + // Make sure the download can complete. + EXPECT_CALL(*download_file, + RenameAndAnnotate(base::FilePath(kDummyTargetPath), _, _, _, _)) + .WillOnce(ScheduleRenameAndAnnotateCallback( + DOWNLOAD_INTERRUPT_REASON_NONE, base::FilePath(kDummyTargetPath), + base::ThreadTaskRunnerHandle::Get())); + EXPECT_CALL(*mock_delegate(), ShouldOpenDownload(item, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*download_file, FullPath()) + .WillOnce(ReturnRefOfCopy(base::FilePath())); + EXPECT_CALL(*download_file, Detach()); + task_environment_.RunUntilIdle(); + EXPECT_EQ(DownloadItem::COMPLETE, item->GetState()); +} + +TEST_F(DownloadItemTest, StealDangerousDownloadAndDiscard) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE); + ASSERT_TRUE(item->IsDangerous()); + base::FilePath full_path(FILE_PATH_LITERAL("foo.txt")); + base::FilePath returned_path; + + EXPECT_CALL(*download_file, FullPath()).WillOnce(ReturnRefOfCopy(full_path)); + EXPECT_CALL(*download_file, Detach()); + EXPECT_CALL(*mock_delegate(), DownloadRemoved(_)); + base::WeakPtrFactory<DownloadItemTest> weak_ptr_factory(this); + item->OnAllDataSaved(0, std::unique_ptr<crypto::SecureHash>()); + item->StealDangerousDownload( + true, // delete_file_after_feedback + base::Bind(&DownloadItemTest::OnDownloadFileAcquired, + weak_ptr_factory.GetWeakPtr(), + base::Unretained(&returned_path))); + task_environment_.RunUntilIdle(); + EXPECT_EQ(full_path, returned_path); +} + +TEST_F(DownloadItemTest, StealDangerousDownloadAndKeep) { + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE); + ASSERT_TRUE(item->IsDangerous()); + base::FilePath full_path(FILE_PATH_LITERAL("foo.txt")); + base::FilePath returned_path; + EXPECT_CALL(*download_file, FullPath()).WillOnce(ReturnRefOfCopy(full_path)); + base::WeakPtrFactory<DownloadItemTest> weak_ptr_factory(this); + item->OnAllDataSaved(0, std::unique_ptr<crypto::SecureHash>()); + item->StealDangerousDownload( + false, // delete_file_after_feedback + base::Bind(&DownloadItemTest::OnDownloadFileAcquired, + weak_ptr_factory.GetWeakPtr(), + base::Unretained(&returned_path))); + task_environment_.RunUntilIdle(); + EXPECT_NE(full_path, returned_path); + CleanupItem(item, download_file, DownloadItem::IN_PROGRESS); +} + +TEST_F(DownloadItemTest, StealInterruptedContinuableDangerousDownload) { + base::FilePath returned_path; + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE); + base::FilePath full_path = item->GetFullPath(); + EXPECT_FALSE(full_path.empty()); + EXPECT_CALL(*download_file, FullPath()).WillOnce(ReturnRefOfCopy(full_path)); + EXPECT_CALL(*download_file, Detach()); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, 1, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_TRUE(item->IsDangerous()); + + EXPECT_CALL(*mock_delegate(), DownloadRemoved(_)); + base::WeakPtrFactory<DownloadItemTest> weak_ptr_factory(this); + item->OnAllDataSaved(0, std::unique_ptr<crypto::SecureHash>()); + item->StealDangerousDownload( + true, base::Bind(&DownloadItemTest::OnDownloadFileAcquired, + weak_ptr_factory.GetWeakPtr(), + base::Unretained(&returned_path))); + task_environment_.RunUntilIdle(); + EXPECT_EQ(full_path, returned_path); +} + +TEST_F(DownloadItemTest, StealInterruptedNonContinuableDangerousDownload) { + base::FilePath returned_path; + DownloadItemImpl* item = CreateDownloadItem(); + MockDownloadFile* download_file = + DoIntermediateRename(item, DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE); + EXPECT_CALL(*download_file, Cancel()); + item->DestinationObserverAsWeakPtr()->DestinationError( + DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, 1, + std::unique_ptr<crypto::SecureHash>()); + ASSERT_TRUE(item->IsDangerous()); + + EXPECT_CALL(*mock_delegate(), DownloadRemoved(_)); + base::WeakPtrFactory<DownloadItemTest> weak_ptr_factory(this); + item->OnAllDataSaved(0, std::unique_ptr<crypto::SecureHash>()); + item->StealDangerousDownload( + true, base::Bind(&DownloadItemTest::OnDownloadFileAcquired, + weak_ptr_factory.GetWeakPtr(), + base::Unretained(&returned_path))); + task_environment_.RunUntilIdle(); + EXPECT_TRUE(returned_path.empty()); +} + +namespace { + +// The DownloadItemDestinationUpdateRaceTest fixture (defined below) is used to +// test for race conditions between download destination events received via the +// DownloadDestinationObserver interface, and the target determination logic. +// +// The general control flow for DownloadItemImpl looks like this: +// +// * Start() called, which in turn calls DownloadFile::Initialize(). +// +// Even though OnDownloadFileInitialized hasn't been called, there could now +// be destination observer calls queued prior to the task that calls +// OnDownloadFileInitialized. Let's call this point in the workflow "A". +// +// * DownloadItemImpl::OnDownloadFileInitialized() called. +// +// * Assuming the result is successful, DII now invokes the delegate's +// DetermineDownloadTarget method. +// +// At this point DonwnloadFile acts as the source of +// DownloadDestinationObserver events, and may invoke callbacks. Let's call +// this point in the workflow "B". +// +// * DII::OnDownloadTargetDetermined() invoked after delegate is done with +// target determination. +// +// * DII attempts to rename the DownloadFile to its intermediate name. +// +// More DownloadDestinationObserver events can happen here. Let's call this +// point in the workflow "C". +// +// * DII::OnDownloadRenamedToIntermediateName() invoked. Assuming all went well, +// DII is now in IN_PROGRESS state. +// +// More DownloadDestinationObserver events can happen here. Let's call this +// point in the workflow "D". +// +// The DownloadItemDestinationUpdateRaceTest works by generating various +// combinations of DownloadDestinationObserver events that might occur at the +// points "A", "B", "C", and "D" above. Each test in this suite cranks a +// DownloadItemImpl through the states listed above and invokes the events +// assigned to each position. + +// This type of callback represents a call to a DownloadDestinationObserver +// method that's missing the DownloadDestinationObserver object. Currying this +// way allows us to bind a call prior to constructing the object on which the +// method would be invoked. This is necessary since we are going to construct +// various permutations of observer calls that will then be applied to a +// DownloadItem in a state as yet undetermined. +using CurriedObservation = + base::Callback<void(base::WeakPtr<DownloadDestinationObserver>)>; + +// A list of observations that are to be made during some event in the +// DownloadItemImpl control flow. Ordering of the observations is significant. +using ObservationList = base::circular_deque<CurriedObservation>; + +// An ordered list of events. +// +// An "event" in this context refers to some stage in the DownloadItemImpl's +// workflow described as "A", "B", "C", or "D" above. An EventList is expected +// to always contains kEventCount events. +using EventList = base::circular_deque<ObservationList>; + +// Number of events in an EventList. This is always 4 for now as described +// above. +const int kEventCount = 4; + +// The following functions help us with currying the calls to +// DownloadDestinationObserver. If std::bind was allowed along with +// std::placeholders, it is possible to avoid these functions, but currently +// Chromium doesn't allow using std::bind for good reasons. +void DestinationUpdateInvoker( + int64_t bytes_so_far, + int64_t bytes_per_sec, + base::WeakPtr<DownloadDestinationObserver> observer) { + DVLOG(20) << "DestinationUpdate(bytes_so_far:" << bytes_so_far + << ", bytes_per_sec:" << bytes_per_sec + << ") observer:" << !!observer; + if (observer) { + observer->DestinationUpdate(bytes_so_far, bytes_per_sec, + std::vector<DownloadItem::ReceivedSlice>()); + } +} + +void DestinationErrorInvoker( + DownloadInterruptReason reason, + int64_t bytes_so_far, + base::WeakPtr<DownloadDestinationObserver> observer) { + DVLOG(20) << "DestinationError(reason:" + << DownloadInterruptReasonToString(reason) + << ", bytes_so_far:" << bytes_so_far << ") observer:" << !!observer; + if (observer) + observer->DestinationError(reason, bytes_so_far, + std::unique_ptr<crypto::SecureHash>()); +} + +void DestinationCompletedInvoker( + int64_t total_bytes, + base::WeakPtr<DownloadDestinationObserver> observer) { + DVLOG(20) << "DestinationComplete(total_bytes:" << total_bytes + << ") observer:" << !!observer; + if (observer) + observer->DestinationCompleted(total_bytes, + std::unique_ptr<crypto::SecureHash>()); +} + +// Given a set of observations (via the range |begin|..|end|), constructs a list +// of EventLists such that: +// +// * There are exactly |event_count| ObservationSets in each EventList. +// +// * Each ObservationList in each EventList contains a subrange (possibly empty) +// of observations from the input range, in the same order as the input range. +// +// * The ordering of the ObservationList in each EventList is such that all +// observations in one ObservationList occur earlier than all observations in +// an ObservationList that follows it. +// +// * The list of EventLists together describe all the possible ways in which the +// list of observations can be distributed into |event_count| events. +std::vector<EventList> DistributeObservationsIntoEvents( + const std::vector<CurriedObservation>::iterator begin, + const std::vector<CurriedObservation>::iterator end, + int event_count) { + std::vector<EventList> all_event_lists; + for (auto partition = begin;; ++partition) { + ObservationList first_group_of_observations(begin, partition); + if (event_count > 1) { + std::vector<EventList> list_of_subsequent_events = + DistributeObservationsIntoEvents(partition, end, event_count - 1); + for (const auto& subsequent_events : list_of_subsequent_events) { + EventList event_list; + event_list = subsequent_events; + event_list.push_front(first_group_of_observations); + all_event_lists.push_back(event_list); + } + } else { + EventList event_list; + event_list.push_front(first_group_of_observations); + all_event_lists.push_back(event_list); + } + if (partition == end) + break; + } + return all_event_lists; +} + +// For the purpose of this tests, we are only concerned with 3 events: +// +// 1. Immediately after the DownloadFile is initialized. +// 2. Immediately after the DownloadTargetCallback is invoked. +// 3. Immediately after the intermediate file is renamed. +// +// We are going to take a couple of sets of DownloadDestinationObserver events +// and distribute them into the three events described above. And then we are +// going to invoke the observations while a DownloadItemImpl is carefully +// stepped through its stages. + +std::vector<EventList> GenerateSuccessfulEventLists() { + std::vector<CurriedObservation> all_observations; + all_observations.push_back(base::Bind(&DestinationUpdateInvoker, 100, 100)); + all_observations.push_back(base::Bind(&DestinationUpdateInvoker, 200, 100)); + all_observations.push_back(base::Bind(&DestinationCompletedInvoker, 200)); + return DistributeObservationsIntoEvents(all_observations.begin(), + all_observations.end(), kEventCount); +} + +std::vector<EventList> GenerateFailingEventLists() { + std::vector<CurriedObservation> all_observations; + all_observations.push_back(base::Bind(&DestinationUpdateInvoker, 100, 100)); + all_observations.push_back(base::Bind( + &DestinationErrorInvoker, DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, 100)); + return DistributeObservationsIntoEvents(all_observations.begin(), + all_observations.end(), kEventCount); +} + +class DownloadItemDestinationUpdateRaceTest + : public DownloadItemTest, + public ::testing::WithParamInterface<EventList> { + public: + DownloadItemDestinationUpdateRaceTest() + : DownloadItemTest(), + item_(CreateDownloadItem()), + file_(new ::testing::StrictMock<MockDownloadFile>()), + request_handle_(new ::testing::StrictMock<MockRequestHandle>()) { + DCHECK_EQ(GetParam().size(), static_cast<unsigned>(kEventCount)); + } + + protected: + const ObservationList& PreInitializeFileObservations() { + return GetParam().front(); + } + const ObservationList& PostInitializeFileObservations() { + return *(GetParam().begin() + 1); + } + const ObservationList& PostTargetDeterminationObservations() { + return *(GetParam().begin() + 2); + } + const ObservationList& PostIntermediateRenameObservations() { + return *(GetParam().begin() + 3); + } + + // Apply all the observations in |observations| to |observer|, but do so + // asynchronously so that the events are applied in order behind any tasks + // that are already scheduled. + void ScheduleObservations( + const ObservationList& observations, + base::WeakPtr<DownloadDestinationObserver> observer) { + for (const auto action : observations) + base::ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce(action, observer)); + } + + DownloadItemImpl* item_; + std::unique_ptr<MockDownloadFile> file_; + std::unique_ptr<MockRequestHandle> request_handle_; + + base::queue<base::Closure> successful_update_events_; + base::queue<base::Closure> failing_update_events_; +}; + +INSTANTIATE_TEST_CASE_P(Success, + DownloadItemDestinationUpdateRaceTest, + ::testing::ValuesIn(GenerateSuccessfulEventLists())); + +INSTANTIATE_TEST_CASE_P(Failure, + DownloadItemDestinationUpdateRaceTest, + ::testing::ValuesIn(GenerateFailingEventLists())); + +} // namespace + +// Run through the DII workflow but the embedder cancels the download at target +// determination. +TEST_P(DownloadItemDestinationUpdateRaceTest, DownloadCancelledByUser) { + // Expect that the download file and the request will be cancelled as a + // result. + EXPECT_CALL(*file_, Cancel()); + EXPECT_CALL(*request_handle_, CancelRequest(_)); + + DownloadFile::InitializeCallback initialize_callback; + EXPECT_CALL(*file_, Initialize(_, _, _, _)) + .WillOnce(SaveArg<0>(&initialize_callback)); + item_->Start(std::move(file_), std::move(request_handle_), *create_info(), + nullptr, nullptr); + task_environment_.RunUntilIdle(); + + base::WeakPtr<DownloadDestinationObserver> destination_observer = + item_->DestinationObserverAsWeakPtr(); + + ScheduleObservations(PreInitializeFileObservations(), destination_observer); + task_environment_.RunUntilIdle(); + + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(_, _)) + .WillOnce(SaveArg<1>(&target_callback)); + ScheduleObservations(PostInitializeFileObservations(), destination_observer); + std::move(initialize_callback).Run(DOWNLOAD_INTERRUPT_REASON_NONE, 0); + + task_environment_.RunUntilIdle(); + + ASSERT_FALSE(target_callback.is_null()); + ScheduleObservations(PostTargetDeterminationObservations(), + destination_observer); + target_callback.Run(base::FilePath(), + DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, base::FilePath(), + DOWNLOAD_INTERRUPT_REASON_NONE); + EXPECT_EQ(DownloadItem::CANCELLED, item_->GetState()); + task_environment_.RunUntilIdle(); +} + +// Run through the DII workflow, but the intermediate rename fails. +TEST_P(DownloadItemDestinationUpdateRaceTest, IntermediateRenameFails) { + // Expect that the download file and the request will be cancelled as a + // result. + EXPECT_CALL(*file_, Cancel()); + EXPECT_CALL(*request_handle_, CancelRequest(_)); + + // Intermediate rename loop is not used immediately, but let's set up the + // DownloadFile expectations since we are about to transfer its ownership to + // the DownloadItem. + DownloadFile::RenameCompletionCallback intermediate_rename_callback; + EXPECT_CALL(*file_, RenameAndUniquify(_, _)) + .WillOnce(SaveArg<1>(&intermediate_rename_callback)); + + DownloadFile::InitializeCallback initialize_callback; + EXPECT_CALL(*file_, Initialize(_, _, _, _)) + .WillOnce(SaveArg<0>(&initialize_callback)); + + item_->Start(std::move(file_), std::move(request_handle_), *create_info(), + nullptr, nullptr); + task_environment_.RunUntilIdle(); + + base::WeakPtr<DownloadDestinationObserver> destination_observer = + item_->DestinationObserverAsWeakPtr(); + + ScheduleObservations(PreInitializeFileObservations(), destination_observer); + task_environment_.RunUntilIdle(); + + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(_, _)) + .WillOnce(SaveArg<1>(&target_callback)); + ScheduleObservations(PostInitializeFileObservations(), destination_observer); + std::move(initialize_callback).Run(DOWNLOAD_INTERRUPT_REASON_NONE, 0); + + task_environment_.RunUntilIdle(); + ASSERT_FALSE(target_callback.is_null()); + + ScheduleObservations(PostTargetDeterminationObservations(), + destination_observer); + target_callback.Run(base::FilePath(kDummyTargetPath), + DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, + base::FilePath(kDummyIntermediatePath), + DOWNLOAD_INTERRUPT_REASON_NONE); + + task_environment_.RunUntilIdle(); + ASSERT_FALSE(intermediate_rename_callback.is_null()); + + ScheduleObservations(PostIntermediateRenameObservations(), + destination_observer); + intermediate_rename_callback.Run(DOWNLOAD_INTERRUPT_REASON_FILE_FAILED, + base::FilePath()); + task_environment_.RunUntilIdle(); + + EXPECT_EQ(DownloadItem::INTERRUPTED, item_->GetState()); +} + +// Run through the DII workflow. Download file initialization, target +// determination and intermediate rename all succeed. +TEST_P(DownloadItemDestinationUpdateRaceTest, IntermediateRenameSucceeds) { + // We expect either that the download will fail (in which case the request and + // the download file will be cancelled), or it will succeed (in which case the + // DownloadFile will Detach()). It depends on the list of observations that + // are given to us. + EXPECT_CALL(*file_, Cancel()).Times(::testing::AnyNumber()); + EXPECT_CALL(*request_handle_, CancelRequest(_)).Times(::testing::AnyNumber()); + EXPECT_CALL(*file_, Detach()).Times(::testing::AnyNumber()); + + EXPECT_CALL(*file_, FullPath()) + .WillRepeatedly(ReturnRefOfCopy(base::FilePath(kDummyIntermediatePath))); + + // Intermediate rename loop is not used immediately, but let's set up the + // DownloadFile expectations since we are about to transfer its ownership to + // the DownloadItem. + DownloadFile::RenameCompletionCallback intermediate_rename_callback; + EXPECT_CALL(*file_, RenameAndUniquify(_, _)) + .WillOnce(SaveArg<1>(&intermediate_rename_callback)); + + DownloadFile::InitializeCallback initialize_callback; + EXPECT_CALL(*file_, Initialize(_, _, _, _)) + .WillOnce(SaveArg<0>(&initialize_callback)); + + item_->Start(std::move(file_), std::move(request_handle_), *create_info(), + nullptr, nullptr); + task_environment_.RunUntilIdle(); + + base::WeakPtr<DownloadDestinationObserver> destination_observer = + item_->DestinationObserverAsWeakPtr(); + + ScheduleObservations(PreInitializeFileObservations(), destination_observer); + task_environment_.RunUntilIdle(); + + DownloadItemImplDelegate::DownloadTargetCallback target_callback; + EXPECT_CALL(*mock_delegate(), DetermineDownloadTarget(_, _)) + .WillOnce(SaveArg<1>(&target_callback)); + ScheduleObservations(PostInitializeFileObservations(), destination_observer); + std::move(initialize_callback).Run(DOWNLOAD_INTERRUPT_REASON_NONE, 0); + + task_environment_.RunUntilIdle(); + ASSERT_FALSE(target_callback.is_null()); + + ScheduleObservations(PostTargetDeterminationObservations(), + destination_observer); + target_callback.Run(base::FilePath(kDummyTargetPath), + DownloadItem::TARGET_DISPOSITION_OVERWRITE, + DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS, + base::FilePath(kDummyIntermediatePath), + DOWNLOAD_INTERRUPT_REASON_NONE); + + task_environment_.RunUntilIdle(); + ASSERT_FALSE(intermediate_rename_callback.is_null()); + + // This may or may not be called, depending on whether there are any errors in + // our action list. + EXPECT_CALL(*mock_delegate(), ShouldCompleteDownload(_, _)) + .Times(::testing::AnyNumber()); + + ScheduleObservations(PostIntermediateRenameObservations(), + destination_observer); + intermediate_rename_callback.Run(DOWNLOAD_INTERRUPT_REASON_NONE, + base::FilePath(kDummyIntermediatePath)); + task_environment_.RunUntilIdle(); + + // The state of the download depends on the observer events that were played + // back to the DownloadItemImpl. Hence we can't establish a single expectation + // here. On Debug builds, the DCHECKs will verify that the state transitions + // were correct. On Release builds, tests are expected to run to completion + // without crashing on success. + EXPECT_TRUE(item_->GetState() == DownloadItem::IN_PROGRESS || + item_->GetState() == DownloadItem::INTERRUPTED); + if (item_->GetState() == DownloadItem::INTERRUPTED) + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED, item_->GetLastReason()); + + item_->Cancel(true); + task_environment_.RunUntilIdle(); +} + +TEST(MockDownloadItem, Compiles) { + MockDownloadItem mock_item; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_job.cc b/chromium/components/download/internal/common/download_job.cc new file mode 100644 index 00000000000..d65b3348c41 --- /dev/null +++ b/chromium/components/download/internal/common/download_job.cc @@ -0,0 +1,114 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_job.h" + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "components/download/public/common/download_item.h" +#include "components/download/public/common/download_task_runner.h" + +namespace download { + +DownloadJob::DownloadJob( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle) + : download_item_(download_item), + request_handle_(std::move(request_handle)), + is_paused_(false), + weak_ptr_factory_(this) {} + +DownloadJob::~DownloadJob() = default; + +void DownloadJob::Cancel(bool user_cancel) { + if (request_handle_) + request_handle_->CancelRequest(user_cancel); +} + +void DownloadJob::Pause() { + is_paused_ = true; + + DownloadFile* download_file = download_item_->GetDownloadFile(); + if (download_file) { + GetDownloadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce(&DownloadFile::Pause, + // Safe because we control download file lifetime. + base::Unretained(download_file))); + } + if (request_handle_) + request_handle_->PauseRequest(); +} + +void DownloadJob::Resume(bool resume_request) { + is_paused_ = false; + if (!resume_request) + return; + + DownloadFile* download_file = download_item_->GetDownloadFile(); + if (download_file) { + GetDownloadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce(&DownloadFile::Resume, + // Safe because we control download file lifetime. + base::Unretained(download_file))); + } + + if (request_handle_) + request_handle_->ResumeRequest(); +} + +void DownloadJob::Start(DownloadFile* download_file_, + DownloadFile::InitializeCallback callback, + const DownloadItem::ReceivedSlices& received_slices) { + GetDownloadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce( + &DownloadFile::Initialize, + // Safe because we control download file lifetime. + base::Unretained(download_file_), + base::BindRepeating(&DownloadJob::OnDownloadFileInitialized, + weak_ptr_factory_.GetWeakPtr(), callback), + base::BindRepeating(&DownloadJob::CancelRequestWithOffset, + weak_ptr_factory_.GetWeakPtr()), + received_slices, IsParallelizable())); +} + +void DownloadJob::OnDownloadFileInitialized( + DownloadFile::InitializeCallback callback, + DownloadInterruptReason result, + int64_t bytes_wasted) { + std::move(callback).Run(result, bytes_wasted); +} + +bool DownloadJob::AddInputStream(std::unique_ptr<InputStream> stream, + int64_t offset, + int64_t length) { + DownloadFile* download_file = download_item_->GetDownloadFile(); + if (!download_file) { + CancelRequestWithOffset(offset); + return false; + } + + // download_file_ is owned by download_item_ on the UI thread and is always + // deleted on the download task runner after download_file_ is nulled out. + // So it's safe to use base::Unretained here. + GetDownloadTaskRunner()->PostTask( + FROM_HERE, base::BindOnce(&DownloadFile::AddInputStream, + base::Unretained(download_file), + std::move(stream), offset, length)); + return true; +} + +void DownloadJob::CancelRequestWithOffset(int64_t offset) {} + +bool DownloadJob::IsParallelizable() const { + return false; +} + +bool DownloadJob::IsSavePackageDownload() const { + return false; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_job_factory.cc b/chromium/components/download/internal/common/download_job_factory.cc new file mode 100644 index 00000000000..b2fda04191a --- /dev/null +++ b/chromium/components/download/internal/common/download_job_factory.cc @@ -0,0 +1,116 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_job_factory.h" + +#include <memory> + +#include "components/download/internal/common/download_job_impl.h" +#include "components/download/internal/common/parallel_download_job.h" +#include "components/download/internal/common/parallel_download_utils.h" +#include "components/download/internal/common/save_package_download_job.h" +#include "components/download/public/common/download_item.h" +#include "components/download/public/common/download_stats.h" +#include "services/network/public/cpp/shared_url_loader_factory.h" + +namespace download { + +namespace { + +// Returns if the download can be parallelized. +bool IsParallelizableDownload(const DownloadCreateInfo& create_info, + DownloadItem* download_item) { + // To enable parallel download, following conditions need to be satisfied. + // 1. Feature |kParallelDownloading| enabled. + // 2. Strong validators response headers. i.e. ETag and Last-Modified. + // 3. Accept-Ranges or Content-Range header. + // 4. Content-Length header. + // 5. Content-Length is no less than the minimum slice size configuration, or + // persisted slices alreay exist. + // 6. HTTP/1.1 protocol, not QUIC nor HTTP/1.0. + // 7. HTTP or HTTPS scheme with GET method in the initial request. + + // Etag and last modified are stored into DownloadCreateInfo in + // DownloadRequestCore only if the response header complies to the strong + // validator rule. + bool has_strong_validator = + !create_info.etag.empty() || !create_info.last_modified.empty(); + bool has_content_length = create_info.total_bytes > 0; + bool satisfy_min_file_size = + !download_item->GetReceivedSlices().empty() || + create_info.total_bytes >= GetMinSliceSizeConfig(); + bool satisfy_connection_type = create_info.connection_info == + net::HttpResponseInfo::CONNECTION_INFO_HTTP1_1; + bool http_get_method = + create_info.method == "GET" && create_info.url().SchemeIsHTTPOrHTTPS(); + + bool is_parallelizable = has_strong_validator && create_info.accept_range && + has_content_length && satisfy_min_file_size && + satisfy_connection_type && http_get_method; + + if (!IsParallelDownloadEnabled()) + return is_parallelizable; + + RecordParallelDownloadCreationEvent( + is_parallelizable + ? ParallelDownloadCreationEvent::STARTED_PARALLEL_DOWNLOAD + : ParallelDownloadCreationEvent::FELL_BACK_TO_NORMAL_DOWNLOAD); + + if (!has_strong_validator) { + RecordParallelDownloadCreationEvent( + ParallelDownloadCreationEvent::FALLBACK_REASON_STRONG_VALIDATORS); + } + if (!create_info.accept_range) { + RecordParallelDownloadCreationEvent( + ParallelDownloadCreationEvent::FALLBACK_REASON_ACCEPT_RANGE_HEADER); + } + if (!has_content_length) { + RecordParallelDownloadCreationEvent( + ParallelDownloadCreationEvent::FALLBACK_REASON_CONTENT_LENGTH_HEADER); + } + if (!satisfy_min_file_size) { + RecordParallelDownloadCreationEvent( + ParallelDownloadCreationEvent::FALLBACK_REASON_FILE_SIZE); + } + if (!satisfy_connection_type) { + RecordParallelDownloadCreationEvent( + ParallelDownloadCreationEvent::FALLBACK_REASON_CONNECTION_TYPE); + } + if (!http_get_method) { + RecordParallelDownloadCreationEvent( + ParallelDownloadCreationEvent::FALLBACK_REASON_HTTP_METHOD); + } + + return is_parallelizable; +} + +} // namespace + +// static +std::unique_ptr<DownloadJob> DownloadJobFactory::CreateJob( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> req_handle, + const DownloadCreateInfo& create_info, + bool is_save_package_download, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + net::URLRequestContextGetter* url_request_context_getter) { + if (is_save_package_download) { + return std::make_unique<SavePackageDownloadJob>(download_item, + std::move(req_handle)); + } + + bool is_parallelizable = IsParallelizableDownload(create_info, download_item); + // Build parallel download job. + if (IsParallelDownloadEnabled() && is_parallelizable) { + return std::make_unique<ParallelDownloadJob>( + download_item, std::move(req_handle), create_info, + std::move(shared_url_loader_factory), url_request_context_getter); + } + + // An ordinary download job. + return std::make_unique<DownloadJobImpl>(download_item, std::move(req_handle), + is_parallelizable); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_job_impl.cc b/chromium/components/download/internal/common/download_job_impl.cc new file mode 100644 index 00000000000..e653a87c652 --- /dev/null +++ b/chromium/components/download/internal/common/download_job_impl.cc @@ -0,0 +1,22 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/common/download_job_impl.h" + +namespace download { + +DownloadJobImpl::DownloadJobImpl( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle, + bool is_parallizable) + : DownloadJob(download_item, std::move(request_handle)), + is_parallizable_(is_parallizable) {} + +DownloadJobImpl::~DownloadJobImpl() = default; + +bool DownloadJobImpl::IsParallelizable() const { + return is_parallizable_; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_job_impl.h b/chromium/components/download/internal/common/download_job_impl.h new file mode 100644 index 00000000000..5be902d2755 --- /dev/null +++ b/chromium/components/download/internal/common/download_job_impl.h @@ -0,0 +1,35 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_DOWNLOAD_INTERNAL_COMMON_DOWNLOAD_JOB_IMPL_H_ +#define COMPONENTS_DOWNLOAD_INTERNAL_COMMON_DOWNLOAD_JOB_IMPL_H_ + +#include "components/download/public/common/download_export.h" +#include "components/download/public/common/download_job.h" + +namespace download { + +class DownloadItem; + +class COMPONENTS_DOWNLOAD_EXPORT DownloadJobImpl : public DownloadJob { + public: + DownloadJobImpl( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle, + bool is_parallizable); + ~DownloadJobImpl() override; + + // DownloadJob implementation. + bool IsParallelizable() const override; + + private: + // Whether the download can be parallized. + bool is_parallizable_; + + DISALLOW_COPY_AND_ASSIGN(DownloadJobImpl); +}; + +} // namespace download + +#endif // COMPONENTS_DOWNLOAD_INTERNAL_COMMON_DOWNLOAD_JOB_IMPL_H_ diff --git a/chromium/components/download/internal/common/download_response_handler.cc b/chromium/components/download/internal/common/download_response_handler.cc new file mode 100644 index 00000000000..941a5479498 --- /dev/null +++ b/chromium/components/download/internal/common/download_response_handler.cc @@ -0,0 +1,218 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_response_handler.h" + +#include <memory> + +#include "components/download/public/common/download_stats.h" +#include "components/download/public/common/download_url_parameters.h" +#include "components/download/public/common/download_utils.h" +#include "net/http/http_status_code.h" + +namespace download { + +namespace { + +mojom::NetworkRequestStatus ConvertInterruptReasonToMojoNetworkRequestStatus( + DownloadInterruptReason reason) { + switch (reason) { + case DOWNLOAD_INTERRUPT_REASON_NONE: + return mojom::NetworkRequestStatus::OK; + case DOWNLOAD_INTERRUPT_REASON_NETWORK_TIMEOUT: + return mojom::NetworkRequestStatus::NETWORK_TIMEOUT; + case DOWNLOAD_INTERRUPT_REASON_NETWORK_DISCONNECTED: + return mojom::NetworkRequestStatus::NETWORK_DISCONNECTED; + case DOWNLOAD_INTERRUPT_REASON_NETWORK_SERVER_DOWN: + return mojom::NetworkRequestStatus::NETWORK_SERVER_DOWN; + case DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE: + return mojom::NetworkRequestStatus::SERVER_NO_RANGE; + case DOWNLOAD_INTERRUPT_REASON_SERVER_CONTENT_LENGTH_MISMATCH: + return mojom::NetworkRequestStatus::SERVER_CONTENT_LENGTH_MISMATCH; + case DOWNLOAD_INTERRUPT_REASON_SERVER_UNREACHABLE: + return mojom::NetworkRequestStatus::SERVER_UNREACHABLE; + case DOWNLOAD_INTERRUPT_REASON_SERVER_CERT_PROBLEM: + return mojom::NetworkRequestStatus::SERVER_CERT_PROBLEM; + case DOWNLOAD_INTERRUPT_REASON_USER_CANCELED: + return mojom::NetworkRequestStatus::USER_CANCELED; + case DOWNLOAD_INTERRUPT_REASON_NETWORK_FAILED: + return mojom::NetworkRequestStatus::NETWORK_FAILED; + default: + NOTREACHED(); + return mojom::NetworkRequestStatus::NETWORK_FAILED; + } +} + +} // namespace + +DownloadResponseHandler::DownloadResponseHandler( + network::ResourceRequest* resource_request, + Delegate* delegate, + std::unique_ptr<DownloadSaveInfo> save_info, + bool is_parallel_request, + bool is_transient, + bool fetch_error_body, + const DownloadUrlParameters::RequestHeadersType& request_headers, + const std::string& request_origin, + DownloadSource download_source, + std::vector<GURL> url_chain) + : delegate_(delegate), + started_(false), + save_info_(std::move(save_info)), + url_chain_(std::move(url_chain)), + method_(resource_request->method), + referrer_(resource_request->referrer), + is_transient_(is_transient), + fetch_error_body_(fetch_error_body), + request_headers_(request_headers), + request_origin_(request_origin), + download_source_(download_source), + has_strong_validators_(false), + is_partial_request_(save_info_->offset > 0), + abort_reason_(DOWNLOAD_INTERRUPT_REASON_NONE) { + if (!is_parallel_request) { + RecordDownloadCountWithSource(UNTHROTTLED_COUNT, download_source); + } + if (resource_request->request_initiator.has_value()) + origin_ = resource_request->request_initiator.value().GetURL(); +} + +DownloadResponseHandler::~DownloadResponseHandler() = default; + +void DownloadResponseHandler::OnReceiveResponse( + const network::ResourceResponseHead& head, + network::mojom::DownloadedTempFilePtr downloaded_file) { + create_info_ = CreateDownloadCreateInfo(head); + cert_status_ = head.cert_status; + + // TODO(xingliu): Do not use http cache. + // Sets page transition type correctly and call + // |RecordDownloadSourcePageTransitionType| here. + if (head.headers) { + has_strong_validators_ = head.headers->HasStrongValidators(); + RecordDownloadHttpResponseCode(head.headers->response_code()); + RecordDownloadContentDisposition(create_info_->content_disposition); + } + + // Blink verifies that the requester of this download is allowed to set a + // suggested name for the security origin of the downlaod URL. However, this + // assumption doesn't hold if there were cross origin redirects. Therefore, + // clear the suggested_name for such requests. + if (origin_.is_valid() && !create_info_->url_chain.back().SchemeIsBlob() && + !create_info_->url_chain.back().SchemeIs(url::kAboutScheme) && + !create_info_->url_chain.back().SchemeIs(url::kDataScheme) && + origin_ != create_info_->url_chain.back().GetOrigin()) { + create_info_->save_info->suggested_name.clear(); + } + + if (create_info_->result != DOWNLOAD_INTERRUPT_REASON_NONE) + OnResponseStarted(mojom::DownloadStreamHandlePtr()); +} + +std::unique_ptr<DownloadCreateInfo> +DownloadResponseHandler::CreateDownloadCreateInfo( + const network::ResourceResponseHead& head) { + auto create_info = std::make_unique<DownloadCreateInfo>( + base::Time::Now(), std::move(save_info_)); + + DownloadInterruptReason result = + head.headers + ? HandleSuccessfulServerResponse( + *head.headers, create_info->save_info.get(), fetch_error_body_) + : DOWNLOAD_INTERRUPT_REASON_NONE; + + create_info->total_bytes = head.content_length > 0 ? head.content_length : 0; + create_info->result = result; + if (result == DOWNLOAD_INTERRUPT_REASON_NONE) + create_info->remote_address = head.socket_address.host(); + create_info->method = method_; + create_info->connection_info = head.connection_info; + create_info->url_chain = url_chain_; + create_info->referrer_url = referrer_; + create_info->transient = is_transient_; + create_info->response_headers = head.headers; + create_info->offset = create_info->save_info->offset; + create_info->mime_type = head.mime_type; + create_info->fetch_error_body = fetch_error_body_; + create_info->request_headers = request_headers_; + create_info->request_origin = request_origin_; + create_info->download_source = download_source_; + + HandleResponseHeaders(head.headers.get(), create_info.get()); + return create_info; +} + +void DownloadResponseHandler::OnReceiveRedirect( + const net::RedirectInfo& redirect_info, + const network::ResourceResponseHead& head) { + if (is_partial_request_) { + // A redirect while attempting a partial resumption indicates a potential + // middle box. Trigger another interruption so that the + // DownloadItem can retry. + abort_reason_ = DOWNLOAD_INTERRUPT_REASON_SERVER_UNREACHABLE; + OnComplete(network::URLLoaderCompletionStatus(net::OK)); + return; + } + url_chain_.push_back(redirect_info.new_url); + method_ = redirect_info.new_method; + referrer_ = GURL(redirect_info.new_referrer); + delegate_->OnReceiveRedirect(); +} + +void DownloadResponseHandler::OnDataDownloaded(int64_t data_length, + int64_t encoded_length) {} + +void DownloadResponseHandler::OnUploadProgress( + int64_t current_position, + int64_t total_size, + OnUploadProgressCallback callback) {} + +void DownloadResponseHandler::OnReceiveCachedMetadata( + const std::vector<uint8_t>& data) {} + +void DownloadResponseHandler::OnTransferSizeUpdated( + int32_t transfer_size_diff) {} + +void DownloadResponseHandler::OnStartLoadingResponseBody( + mojo::ScopedDataPipeConsumerHandle body) { + if (started_) + return; + + mojom::DownloadStreamHandlePtr stream_handle = + mojom::DownloadStreamHandle::New(); + stream_handle->stream = std::move(body); + stream_handle->client_request = mojo::MakeRequest(&client_ptr_); + OnResponseStarted(std::move(stream_handle)); +} + +void DownloadResponseHandler::OnComplete( + const network::URLLoaderCompletionStatus& status) { + DownloadInterruptReason reason = HandleRequestCompletionStatus( + static_cast<net::Error>(status.error_code), has_strong_validators_, + cert_status_, abort_reason_); + + if (client_ptr_) { + client_ptr_->OnStreamCompleted( + ConvertInterruptReasonToMojoNetworkRequestStatus(reason)); + } + + if (started_) + return; + + // OnComplete() called without OnReceiveResponse(). This should only + // happen when the request was aborted. + create_info_ = CreateDownloadCreateInfo(network::ResourceResponseHead()); + create_info_->result = reason; + + OnResponseStarted(mojom::DownloadStreamHandlePtr()); +} + +void DownloadResponseHandler::OnResponseStarted( + mojom::DownloadStreamHandlePtr stream_handle) { + started_ = true; + delegate_->OnResponseStarted(std::move(create_info_), + std::move(stream_handle)); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_stats.cc b/chromium/components/download/internal/common/download_stats.cc index 5627b0b6057..829b99ed9d6 100644 --- a/chromium/components/download/internal/common/download_stats.cc +++ b/chromium/components/download/internal/common/download_stats.cc @@ -6,6 +6,7 @@ #include <map> +#include "base/callback.h" #include "base/files/file_path.h" #include "base/macros.h" #include "base/metrics/histogram_functions.h" @@ -19,6 +20,9 @@ namespace download { namespace { +// The maximium value for download deletion retention time histogram. +const int kMaxDeletionRetentionHours = 720; + // All possible error codes from the network module. Note that the error codes // are all positive (since histograms expect positive sample values). const int kAllInterruptReasonCodes[] = { @@ -338,6 +342,28 @@ void RecordDownloadCompleted(const base::TimeTicks& start, } } +void RecordDownloadDeletion(base::Time completion_time, + const std::string& mime_type) { + if (completion_time == base::Time()) + return; + + // Records how long the user keeps media files on disk. + base::TimeDelta retention_time = base::Time::Now() - completion_time; + int retention_hours = retention_time.InHours(); + + DownloadContent type = DownloadContentFromMimeType(mime_type, false); + if (type == DownloadContent::VIDEO) { + UMA_HISTOGRAM_CUSTOM_COUNTS("Download.DeleteRetentionTime.Video", + retention_hours, 1, kMaxDeletionRetentionHours, + 50); + } + if (type == DownloadContent::AUDIO) { + UMA_HISTOGRAM_CUSTOM_COUNTS("Download.DeleteRetentionTime.Audio", + retention_hours, 1, kMaxDeletionRetentionHours, + 50); + } +} + void RecordDownloadInterrupted(DownloadInterruptReason reason, int64_t received, int64_t total, @@ -1044,6 +1070,32 @@ void RecordDownloadConnectionSecurity(const GURL& download_url, DOWNLOAD_CONNECTION_SECURITY_MAX); } +void RecordDownloadContentTypeSecurity( + const GURL& download_url, + const std::vector<GURL>& url_chain, + const std::string& mime_type, + const base::RepeatingCallback<bool(const GURL&)>& + is_origin_secure_callback) { + bool is_final_download_secure = is_origin_secure_callback.Run(download_url); + bool is_redirect_chain_secure = true; + for (const auto& url : url_chain) { + if (!is_origin_secure_callback.Run(url)) { + is_redirect_chain_secure = false; + break; + } + } + + DownloadContent download_content = + download::DownloadContentFromMimeType(mime_type, false); + if (is_final_download_secure && is_redirect_chain_secure) { + UMA_HISTOGRAM_ENUMERATION("Download.Start.ContentType.SecureChain", + download_content, DownloadContent::MAX); + } else { + UMA_HISTOGRAM_ENUMERATION("Download.Start.ContentType.InsecureChain", + download_content, DownloadContent::MAX); + } +} + void RecordDownloadSourcePageTransitionType( const base::Optional<ui::PageTransition>& page_transition) { if (!page_transition) diff --git a/chromium/components/download/internal/common/download_task_runner.cc b/chromium/components/download/internal/common/download_task_runner.cc index cc24d67d5d0..0cc99355e32 100644 --- a/chromium/components/download/internal/common/download_task_runner.cc +++ b/chromium/components/download/internal/common/download_task_runner.cc @@ -4,6 +4,9 @@ #include "components/download/public/common/download_task_runner.h" +#include "base/lazy_instance.h" +#include "base/no_destructor.h" +#include "base/synchronization/lock.h" #include "base/task_scheduler/lazy_task_runner.h" #include "build/build_config.h" @@ -24,10 +27,42 @@ base::LazySequencedTaskRunner g_download_task_runner = base::TaskTraits(base::MayBlock(), base::TaskPriority::USER_VISIBLE)); #endif +base::LazyInstance<scoped_refptr<base::SingleThreadTaskRunner>>:: + DestructorAtExit g_io_task_runner = LAZY_INSTANCE_INITIALIZER; + +// Lock to protect |g_io_task_runner| +base::Lock& GetIOTaskRunnerLock() { + static base::NoDestructor<base::Lock> instance; + return *instance; +} + } // namespace scoped_refptr<base::SequencedTaskRunner> GetDownloadTaskRunner() { return g_download_task_runner.Get(); } +void SetIOTaskRunner( + const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) { + base::AutoLock auto_lock(GetIOTaskRunnerLock()); + static int count = 0; + if (task_runner) { + DCHECK(!g_io_task_runner.Get() || + task_runner.get() == g_io_task_runner.Get().get()); + count++; + g_io_task_runner.Get() = task_runner; + return; + } + + count--; + DCHECK_GE(count, 0); + if (count == 0) + g_io_task_runner.Get() = nullptr; +} + +scoped_refptr<base::SingleThreadTaskRunner> GetIOTaskRunner() { + base::AutoLock auto_lock(GetIOTaskRunnerLock()); + return g_io_task_runner.Get(); +} + } // namespace download diff --git a/chromium/components/download/internal/common/download_ukm_helper.cc b/chromium/components/download/internal/common/download_ukm_helper.cc index ec99fc75fba..8f071e66c56 100644 --- a/chromium/components/download/internal/common/download_ukm_helper.cc +++ b/chromium/components/download/internal/common/download_ukm_helper.cc @@ -20,6 +20,10 @@ int DownloadUkmHelper::CalcExponentialBucket(int value) { return static_cast<int>(floor(log10(value + 1) / CalcBucketIncrement())); } +int DownloadUkmHelper::CalcNearestKB(int num_bytes) { + return num_bytes / 1024; +} + void DownloadUkmHelper::RecordDownloadStarted(int download_id, ukm::SourceId source_id, DownloadContent file_type, @@ -36,14 +40,16 @@ void DownloadUkmHelper::RecordDownloadInterrupted( base::Optional<int> change_in_file_size, DownloadInterruptReason reason, int resulting_file_size, - const base::TimeDelta& time_since_start) { + const base::TimeDelta& time_since_start, + int64_t bytes_wasted) { ukm::SourceId source_id = ukm::UkmRecorder::GetNewSourceID(); ukm::builders::Download_Interrupted builder(source_id); builder.SetDownloadId(download_id) .SetReason(static_cast<int>(reason)) .SetResultingFileSize( DownloadUkmHelper::CalcExponentialBucket(resulting_file_size)) - .SetTimeSinceStart(time_since_start.InMilliseconds()); + .SetTimeSinceStart(time_since_start.InMilliseconds()) + .SetBytesWasted(DownloadUkmHelper::CalcNearestKB(bytes_wasted)); if (change_in_file_size.has_value()) { builder.SetChangeInFileSize( DownloadUkmHelper::CalcExponentialBucket(change_in_file_size.value())); @@ -66,13 +72,15 @@ void DownloadUkmHelper::RecordDownloadResumed( void DownloadUkmHelper::RecordDownloadCompleted( int download_id, int resulting_file_size, - const base::TimeDelta& time_since_start) { + const base::TimeDelta& time_since_start, + int64_t bytes_wasted) { ukm::SourceId source_id = ukm::UkmRecorder::GetNewSourceID(); ukm::builders::Download_Completed(source_id) .SetDownloadId(download_id) .SetResultingFileSize( DownloadUkmHelper::CalcExponentialBucket(resulting_file_size)) .SetTimeSinceStart(time_since_start.InMilliseconds()) + .SetBytesWasted(DownloadUkmHelper::CalcNearestKB(bytes_wasted)) .Record(ukm::UkmRecorder::Get()); } diff --git a/chromium/components/download/internal/common/download_ukm_helper_unittest.cc b/chromium/components/download/internal/common/download_ukm_helper_unittest.cc index 4cc969a7f86..48ef050c53a 100644 --- a/chromium/components/download/internal/common/download_ukm_helper_unittest.cc +++ b/chromium/components/download/internal/common/download_ukm_helper_unittest.cc @@ -71,9 +71,10 @@ TEST_F(DownloadUkmHelperTest, TestBasicReporting) { DownloadInterruptReason reason = DOWNLOAD_INTERRUPT_REASON_NONE; int resulting_file_size = 2000; int time_since_start = 250; + int bytes_wasted = 1234; DownloadUkmHelper::RecordDownloadInterrupted( download_id_, change_in_file_size, reason, resulting_file_size, - base::TimeDelta::FromMilliseconds(time_since_start)); + base::TimeDelta::FromMilliseconds(time_since_start), bytes_wasted); ExpectUkmMetrics( UkmDownloadInterrupted::kEntryName, @@ -81,12 +82,13 @@ TEST_F(DownloadUkmHelperTest, TestBasicReporting) { UkmDownloadInterrupted::kChangeInFileSizeName, UkmDownloadInterrupted::kReasonName, UkmDownloadInterrupted::kResultingFileSizeName, - UkmDownloadInterrupted::kTimeSinceStartName}, + UkmDownloadInterrupted::kTimeSinceStartName, + UkmDownloadInterrupted::kBytesWastedName}, {download_id_, DownloadUkmHelper::CalcExponentialBucket(change_in_file_size), static_cast<int>(reason), DownloadUkmHelper::CalcExponentialBucket(resulting_file_size), - time_since_start}); + time_since_start, DownloadUkmHelper::CalcNearestKB(bytes_wasted)}); // RecordDownloadResumed. ResumeMode mode = ResumeMode::IMMEDIATE_RESTART; @@ -104,18 +106,22 @@ TEST_F(DownloadUkmHelperTest, TestBasicReporting) { // RecordDownloadCompleted. int resulting_file_size_completed = 3000; int time_since_start_completed = 400; + int bytes_wasted_completed = 2345; DownloadUkmHelper::RecordDownloadCompleted( download_id_, resulting_file_size_completed, - base::TimeDelta::FromMilliseconds(time_since_start_completed)); + base::TimeDelta::FromMilliseconds(time_since_start_completed), + bytes_wasted_completed); ExpectUkmMetrics( UkmDownloadCompleted::kEntryName, {UkmDownloadCompleted::kDownloadIdName, UkmDownloadCompleted::kResultingFileSizeName, - UkmDownloadCompleted::kTimeSinceStartName}, + UkmDownloadCompleted::kTimeSinceStartName, + UkmDownloadCompleted::kBytesWastedName}, {download_id_, DownloadUkmHelper::CalcExponentialBucket(resulting_file_size_completed), - time_since_start_completed}); + time_since_start_completed, + DownloadUkmHelper::CalcNearestKB(bytes_wasted_completed)}); } } // namespace download diff --git a/chromium/components/download/internal/common/download_url_loader_factory_getter.cc b/chromium/components/download/internal/common/download_url_loader_factory_getter.cc new file mode 100644 index 00000000000..4f3b2823b7d --- /dev/null +++ b/chromium/components/download/internal/common/download_url_loader_factory_getter.cc @@ -0,0 +1,23 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/download_url_loader_factory_getter.h" + +#include "components/download/public/common/download_task_runner.h" + +namespace download { + +DownloadURLLoaderFactoryGetter::DownloadURLLoaderFactoryGetter() = default; + +DownloadURLLoaderFactoryGetter::~DownloadURLLoaderFactoryGetter() = default; + +void DownloadURLLoaderFactoryGetter::DeleteOnCorrectThread() const { + if (GetIOTaskRunner() && !GetIOTaskRunner()->BelongsToCurrentThread()) { + GetIOTaskRunner()->DeleteSoon(FROM_HERE, this); + return; + } + delete this; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_utils.cc b/chromium/components/download/internal/common/download_utils.cc index f59702cb72c..224894906f4 100644 --- a/chromium/components/download/internal/common/download_utils.cc +++ b/chromium/components/download/internal/common/download_utils.cc @@ -2,10 +2,349 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "components/download/public/common/download_utils.h" + +#include "base/format_macros.h" +#include "base/strings/stringprintf.h" +#include "components/download/public/common/download_create_info.h" +#include "components/download/public/common/download_interrupt_reasons_utils.h" #include "components/download/public/common/download_item.h" +#include "components/download/public/common/download_save_info.h" +#include "components/download/public/common/download_stats.h" +#include "components/download/public/common/download_url_parameters.h" +#include "net/base/load_flags.h" +#include "net/http/http_request_headers.h" +#include "net/http/http_status_code.h" +#include "services/network/public/cpp/resource_request.h" namespace download { +namespace { + +void AppendExtraHeaders(net::HttpRequestHeaders* headers, + DownloadUrlParameters* params) { + for (const auto& header : params->request_headers()) + headers->SetHeaderIfMissing(header.first, header.second); +} + +} // namespace + const uint32_t DownloadItem::kInvalidId = 0; +DownloadInterruptReason HandleRequestCompletionStatus( + net::Error error_code, + bool has_strong_validators, + net::CertStatus cert_status, + DownloadInterruptReason abort_reason) { + // ERR_CONTENT_LENGTH_MISMATCH can be caused by 1 of the following reasons: + // 1. Server or proxy closes the connection too early. + // 2. The content-length header is wrong. + // If the download has strong validators, we can interrupt the download + // and let it resume automatically. Otherwise, resuming the download will + // cause it to restart and the download may never complete if the error was + // caused by reason 2. As a result, downloads without strong validators are + // treated as completed here. + // TODO(qinmin): check the metrics from downloads with strong validators, + // and decide whether we should interrupt downloads without strong validators + // rather than complete them. + if (error_code == net::ERR_CONTENT_LENGTH_MISMATCH && + !has_strong_validators) { + error_code = net::OK; + RecordDownloadCount(COMPLETED_WITH_CONTENT_LENGTH_MISMATCH_COUNT); + } + + if (error_code == net::ERR_ABORTED) { + // ERR_ABORTED == something outside of the network + // stack cancelled the request. There aren't that many things that + // could do this to a download request (whose lifetime is separated from + // the tab from which it came). We map this to USER_CANCELLED as the + // case we know about (system suspend because of laptop close) corresponds + // to a user action. + // TODO(asanka): A lid close or other power event should result in an + // interruption that doesn't discard the partial state, unlike + // USER_CANCELLED. (https://crbug.com/166179) + if (net::IsCertStatusError(cert_status)) + return DOWNLOAD_INTERRUPT_REASON_SERVER_CERT_PROBLEM; + else + return DOWNLOAD_INTERRUPT_REASON_USER_CANCELED; + } else if (abort_reason != DOWNLOAD_INTERRUPT_REASON_NONE) { + // If a more specific interrupt reason was specified before the request + // was explicitly cancelled, then use it. + return abort_reason; + } + + return ConvertNetErrorToInterruptReason(error_code, + DOWNLOAD_INTERRUPT_FROM_NETWORK); +} + +DownloadInterruptReason HandleSuccessfulServerResponse( + const net::HttpResponseHeaders& http_headers, + DownloadSaveInfo* save_info, + bool fetch_error_body) { + DownloadInterruptReason result = DOWNLOAD_INTERRUPT_REASON_NONE; + switch (http_headers.response_code()) { + case -1: // Non-HTTP request. + case net::HTTP_OK: + case net::HTTP_NON_AUTHORITATIVE_INFORMATION: + case net::HTTP_PARTIAL_CONTENT: + // Expected successful codes. + break; + + case net::HTTP_CREATED: + case net::HTTP_ACCEPTED: + // Per RFC 7231 the entity being transferred is metadata about the + // resource at the target URL and not the resource at that URL (or the + // resource that would be at the URL once processing is completed in the + // case of HTTP_ACCEPTED). However, we currently don't have special + // handling for these response and they are downloaded the same as a + // regular response. + break; + + case net::HTTP_NO_CONTENT: + case net::HTTP_RESET_CONTENT: + // These two status codes don't have an entity (or rather RFC 7231 + // requires that there be no entity). They are treated the same as the + // resource not being found since there is no entity to download. + + case net::HTTP_NOT_FOUND: + result = DOWNLOAD_INTERRUPT_REASON_SERVER_BAD_CONTENT; + break; + + case net::HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: + // Retry by downloading from the start automatically: + // If we haven't received data when we get this error, we won't. + result = DOWNLOAD_INTERRUPT_REASON_SERVER_NO_RANGE; + break; + case net::HTTP_UNAUTHORIZED: + case net::HTTP_PROXY_AUTHENTICATION_REQUIRED: + // Server didn't authorize this request. + result = DOWNLOAD_INTERRUPT_REASON_SERVER_UNAUTHORIZED; + break; + case net::HTTP_FORBIDDEN: + // Server forbids access to this resource. + result = DOWNLOAD_INTERRUPT_REASON_SERVER_FORBIDDEN; + break; + default: // All other errors. + // Redirection and informational codes should have been handled earlier + // in the stack. + // TODO(xingliu): Handle HTTP_PRECONDITION_FAILED and resurrect + // DOWNLOAD_INTERRUPT_REASON_SERVER_PRECONDITION for range + // requests. This will change extensions::api::InterruptReason. + DCHECK_NE(3, http_headers.response_code() / 100); + DCHECK_NE(1, http_headers.response_code() / 100); + result = DOWNLOAD_INTERRUPT_REASON_SERVER_FAILED; + } + + if (result != DOWNLOAD_INTERRUPT_REASON_NONE && !fetch_error_body) + return result; + + // The caller is expecting a partial response. + if (save_info && (save_info->offset > 0 || save_info->length > 0)) { + if (http_headers.response_code() != net::HTTP_PARTIAL_CONTENT) { + // Server should send partial content when "If-Match" or + // "If-Unmodified-Since" check passes, and the range request header has + // last byte position. e.g. "Range:bytes=50-99". + if (save_info->length != DownloadSaveInfo::kLengthFullContent && + !fetch_error_body) + return DOWNLOAD_INTERRUPT_REASON_SERVER_BAD_CONTENT; + + // Requested a partial range, but received the entire response, when + // the range request header is "Range:bytes={offset}-". + // The response can be HTTP 200 or other error code when + // |fetch_error_body| is true. + save_info->offset = 0; + save_info->hash_of_partial_file.clear(); + save_info->hash_state.reset(); + return DOWNLOAD_INTERRUPT_REASON_NONE; + } + + int64_t first_byte = -1; + int64_t last_byte = -1; + int64_t length = -1; + if (!http_headers.GetContentRangeFor206(&first_byte, &last_byte, &length)) + return DOWNLOAD_INTERRUPT_REASON_SERVER_BAD_CONTENT; + DCHECK_GE(first_byte, 0); + + if (first_byte != save_info->offset || + (save_info->length > 0 && + last_byte != save_info->offset + save_info->length - 1)) { + // The server returned a different range than the one we requested. Assume + // the response is bad. + // + // In the future we should consider allowing offsets that are less than + // the offset we've requested, since in theory we can truncate the partial + // file at the offset and continue. + return DOWNLOAD_INTERRUPT_REASON_SERVER_BAD_CONTENT; + } + + return DOWNLOAD_INTERRUPT_REASON_NONE; + } + + if (http_headers.response_code() == net::HTTP_PARTIAL_CONTENT) + return DOWNLOAD_INTERRUPT_REASON_SERVER_BAD_CONTENT; + + return DOWNLOAD_INTERRUPT_REASON_NONE; +} + +void HandleResponseHeaders(const net::HttpResponseHeaders* headers, + DownloadCreateInfo* create_info) { + if (!headers) + return; + + if (headers->HasStrongValidators()) { + // If we don't have strong validators as per RFC 7232 section 2, then + // we neither store nor use them for range requests. + if (!headers->EnumerateHeader(nullptr, "Last-Modified", + &create_info->last_modified)) + create_info->last_modified.clear(); + if (!headers->EnumerateHeader(nullptr, "ETag", &create_info->etag)) + create_info->etag.clear(); + } + + // Grab the first content-disposition header. There may be more than one, + // though as of this writing, the network stack ensures if there are, they + // are all duplicates. + headers->EnumerateHeader(nullptr, "Content-Disposition", + &create_info->content_disposition); + + // Parse the original mime type from the header, notice that actual mime type + // might be different due to mime type sniffing. + if (!headers->GetMimeType(&create_info->original_mime_type)) + create_info->original_mime_type.clear(); + + // Content-Range is validated in HandleSuccessfulServerResponse. + // In RFC 7233, a single part 206 partial response must generate + // Content-Range. Accept-Range may be sent in 200 response to indicate the + // server can handle range request, but optional in 206 response. + create_info->accept_range = + headers->HasHeaderValue("Accept-Ranges", "bytes") || + (headers->HasHeader("Content-Range") && + headers->response_code() == net::HTTP_PARTIAL_CONTENT); +} + +std::unique_ptr<network::ResourceRequest> CreateResourceRequest( + DownloadUrlParameters* params) { + DCHECK(params->offset() >= 0); + + std::unique_ptr<network::ResourceRequest> request( + new network::ResourceRequest); + request->method = params->method(); + request->url = params->url(); + request->request_initiator = params->initiator(); + request->do_not_prompt_for_login = params->do_not_prompt_for_login(); + request->site_for_cookies = params->url(); + request->referrer = params->referrer(); + request->referrer_policy = params->referrer_policy(); + request->allow_download = true; + request->is_main_frame = true; + + if (params->render_process_host_id() >= 0) + request->render_frame_id = params->render_frame_host_routing_id(); + + bool has_upload_data = false; + if (params->post_body()) { + request->request_body = params->post_body(); + has_upload_data = true; + } + + if (params->post_id() >= 0) { + // The POST in this case does not have an actual body, and only works + // when retrieving data from cache. This is done because we don't want + // to do a re-POST without user consent, and currently don't have a good + // plan on how to display the UI for that. + DCHECK(params->prefer_cache()); + DCHECK_EQ("POST", params->method()); + request->request_body = new network::ResourceRequestBody(); + request->request_body->set_identifier(params->post_id()); + has_upload_data = true; + } + + request->load_flags = GetLoadFlags(params, has_upload_data); + + // Add additional request headers. + std::unique_ptr<net::HttpRequestHeaders> headers = + GetAdditionalRequestHeaders(params); + request->headers.Swap(headers.get()); + + return request; +} + +int GetLoadFlags(DownloadUrlParameters* params, bool has_upload_data) { + int load_flags = 0; + if (params->prefer_cache()) { + // If there is upload data attached, only retrieve from cache because there + // is no current mechanism to prompt the user for their consent for a + // re-post. For GETs, try to retrieve data from the cache and skip + // validating the entry if present. + if (has_upload_data) + load_flags |= net::LOAD_ONLY_FROM_CACHE | net::LOAD_SKIP_CACHE_VALIDATION; + else + load_flags |= net::LOAD_SKIP_CACHE_VALIDATION; + } else { + load_flags |= net::LOAD_DISABLE_CACHE; + } + return load_flags; +} + +std::unique_ptr<net::HttpRequestHeaders> GetAdditionalRequestHeaders( + DownloadUrlParameters* params) { + auto headers = std::make_unique<net::HttpRequestHeaders>(); + if (params->offset() == 0 && + params->length() == DownloadSaveInfo::kLengthFullContent) { + AppendExtraHeaders(headers.get(), params); + return headers; + } + + bool has_last_modified = !params->last_modified().empty(); + bool has_etag = !params->etag().empty(); + + // Strong validator(i.e. etag or last modified) is required in range requests + // for download resumption and parallel download. + DCHECK(has_etag || has_last_modified); + if (!has_etag && !has_last_modified) { + DVLOG(1) << "Creating partial request without strong validators."; + AppendExtraHeaders(headers.get(), params); + return headers; + } + + // Add "Range" header. + std::string range_header = + (params->length() == DownloadSaveInfo::kLengthFullContent) + ? base::StringPrintf("bytes=%" PRId64 "-", params->offset()) + : base::StringPrintf("bytes=%" PRId64 "-%" PRId64, params->offset(), + params->offset() + params->length() - 1); + headers->SetHeader(net::HttpRequestHeaders::kRange, range_header); + + // Add "If-Range" headers. + if (params->use_if_range()) { + // In accordance with RFC 7233 Section 3.2, use If-Range to specify that + // the server return the entire entity if the validator doesn't match. + // Last-Modified can be used in the absence of ETag as a validator if the + // response headers satisfied the HttpUtil::HasStrongValidators() + // predicate. + // + // This function assumes that HasStrongValidators() was true and that the + // ETag and Last-Modified header values supplied are valid. + headers->SetHeader(net::HttpRequestHeaders::kIfRange, + has_etag ? params->etag() : params->last_modified()); + AppendExtraHeaders(headers.get(), params); + return headers; + } + + // Add "If-Match"/"If-Unmodified-Since" headers. + if (has_etag) + headers->SetHeader(net::HttpRequestHeaders::kIfMatch, params->etag()); + + // According to RFC 7232 section 3.4, "If-Unmodified-Since" is mainly for + // old servers that didn't implement "If-Match" and must be ignored when + // "If-Match" presents. + if (has_last_modified) { + headers->SetHeader(net::HttpRequestHeaders::kIfUnmodifiedSince, + params->last_modified()); + } + + AppendExtraHeaders(headers.get(), params); + return headers; +} + } // namespace download diff --git a/chromium/components/download/internal/common/download_worker.cc b/chromium/components/download/internal/common/download_worker.cc new file mode 100644 index 00000000000..c05b1f8a0c2 --- /dev/null +++ b/chromium/components/download/internal/common/download_worker.cc @@ -0,0 +1,151 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/common/download_worker.h" + +#include "base/message_loop/message_loop.h" +#include "components/download/public/common/download_create_info.h" +#include "components/download/public/common/download_interrupt_reasons.h" +#include "components/download/public/common/download_task_runner.h" +#include "components/download/public/common/download_utils.h" +#include "components/download/public/common/input_stream.h" +#include "components/download/public/common/resource_downloader.h" +#include "components/download/public/common/url_download_handler_factory.h" +#include "services/network/public/cpp/features.h" +#include "services/network/public/cpp/shared_url_loader_factory.h" + +namespace download { +namespace { + +const int kWorkerVerboseLevel = 1; + +class CompletedInputStream : public InputStream { + public: + CompletedInputStream(DownloadInterruptReason status) : status_(status){}; + ~CompletedInputStream() override = default; + + // InputStream + bool IsEmpty() override { return false; } + InputStream::StreamState Read(scoped_refptr<net::IOBuffer>* data, + size_t* length) override { + *length = 0; + return InputStream::StreamState::COMPLETE; + } + + DownloadInterruptReason GetCompletionStatus() override { return status_; } + + private: + DownloadInterruptReason status_; + DISALLOW_COPY_AND_ASSIGN(CompletedInputStream); +}; + +void CreateUrlDownloadHandler( + std::unique_ptr<DownloadUrlParameters> params, + base::WeakPtr<UrlDownloadHandler::Delegate> delegate, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) { + auto downloader = UrlDownloadHandlerFactory::Create( + std::move(params), delegate, std::move(shared_url_loader_factory), + task_runner); + task_runner->PostTask( + FROM_HERE, + base::BindOnce(&UrlDownloadHandler::Delegate::OnUrlDownloadHandlerCreated, + delegate, std::move(downloader))); +} + +} // namespace + +DownloadWorker::DownloadWorker(DownloadWorker::Delegate* delegate, + int64_t offset, + int64_t length) + : delegate_(delegate), + offset_(offset), + length_(length), + is_paused_(false), + is_canceled_(false), + is_user_cancel_(false), + url_download_handler_(nullptr, base::OnTaskRunnerDeleter(nullptr)), + weak_factory_(this) { + DCHECK(delegate_); +} + +DownloadWorker::~DownloadWorker() = default; + +void DownloadWorker::SendRequest( + std::unique_ptr<DownloadUrlParameters> params, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory) { + GetIOTaskRunner()->PostTask( + FROM_HERE, base::BindOnce(&CreateUrlDownloadHandler, std::move(params), + weak_factory_.GetWeakPtr(), + std::move(shared_url_loader_factory), + base::ThreadTaskRunnerHandle::Get())); +} + +void DownloadWorker::Pause() { + is_paused_ = true; + if (request_handle_) + request_handle_->PauseRequest(); +} + +void DownloadWorker::Resume() { + is_paused_ = false; + if (request_handle_) + request_handle_->ResumeRequest(); +} + +void DownloadWorker::Cancel(bool user_cancel) { + is_canceled_ = true; + is_user_cancel_ = user_cancel; + if (request_handle_) + request_handle_->CancelRequest(user_cancel); +} + +void DownloadWorker::OnUrlDownloadStarted( + std::unique_ptr<DownloadCreateInfo> create_info, + std::unique_ptr<InputStream> input_stream, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + const DownloadUrlParameters::OnStartedCallback& callback) { + // |callback| is not used in subsequent requests. + DCHECK(callback.is_null()); + + // Destroy the request if user canceled. + if (is_canceled_) { + VLOG(kWorkerVerboseLevel) + << "Byte stream arrived after user cancel the request."; + create_info->request_handle->CancelRequest(is_user_cancel_); + return; + } + + // TODO(xingliu): Add metric for error handling. + if (create_info->result != DOWNLOAD_INTERRUPT_REASON_NONE) { + VLOG(kWorkerVerboseLevel) + << "Parallel download sub-request failed. reason = " + << create_info->result; + input_stream.reset(new CompletedInputStream(create_info->result)); + } + + request_handle_ = std::move(create_info->request_handle); + + // Pause the stream if user paused, still push the stream reader to the sink. + if (is_paused_) { + VLOG(kWorkerVerboseLevel) + << "Byte stream arrived after user pause the request."; + Pause(); + } + + delegate_->OnInputStreamReady(this, std::move(input_stream)); +} + +void DownloadWorker::OnUrlDownloadStopped(UrlDownloadHandler* downloader) { + // Release the |url_download_handler_|, the object will be deleted on IO + // thread. + url_download_handler_.reset(); +} + +void DownloadWorker::OnUrlDownloadHandlerCreated( + UrlDownloadHandler::UniqueUrlDownloadHandlerPtr downloader) { + url_download_handler_ = std::move(downloader); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/download_worker.h b/chromium/components/download/internal/common/download_worker.h new file mode 100644 index 00000000000..8996f082d12 --- /dev/null +++ b/chromium/components/download/internal/common/download_worker.h @@ -0,0 +1,95 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_DOWNLOAD_INTERNAL_COMMON_DOWNLOAD_WORKER_H_ +#define COMPONENTS_DOWNLOAD_INTERNAL_COMMON_DOWNLOAD_WORKER_H_ + +#include <memory> + +#include "base/macros.h" +#include "base/memory/weak_ptr.h" +#include "components/download/public/common/download_export.h" +#include "components/download/public/common/download_request_handle_interface.h" +#include "components/download/public/common/download_url_parameters.h" +#include "components/download/public/common/url_download_handler.h" + +namespace network { +class SharedURLLoaderFactory; +} + +namespace download { + +// Helper class used to send subsequent range requests to fetch slices of the +// file after handling response of the original non-range request. +// TODO(xingliu): we should consider to reuse this class for single connection +// download. +class COMPONENTS_DOWNLOAD_EXPORT DownloadWorker + : public UrlDownloadHandler::Delegate { + public: + class Delegate { + public: + // Called when the the input stream is established after server response is + // handled. The stream contains data starts from |offset| of the + // destination file. + virtual void OnInputStreamReady( + DownloadWorker* worker, + std::unique_ptr<InputStream> input_stream) = 0; + }; + + DownloadWorker(DownloadWorker::Delegate* delegate, + int64_t offset, + int64_t length); + virtual ~DownloadWorker(); + + int64_t offset() const { return offset_; } + int64_t length() const { return length_; } + + // Send network request to ask for a download. + void SendRequest( + std::unique_ptr<DownloadUrlParameters> params, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory); + + // Download operations. + void Pause(); + void Resume(); + void Cancel(bool user_cancel); + + private: + // UrlDownloader::Delegate implementation. + void OnUrlDownloadStarted( + std::unique_ptr<DownloadCreateInfo> create_info, + std::unique_ptr<InputStream> input_stream, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + const DownloadUrlParameters::OnStartedCallback& callback) override; + void OnUrlDownloadStopped(UrlDownloadHandler* downloader) override; + void OnUrlDownloadHandlerCreated( + UrlDownloadHandler::UniqueUrlDownloadHandlerPtr downloader) override; + + DownloadWorker::Delegate* const delegate_; + + // The starting position of the content for this worker to download. + int64_t offset_; + + // The length of the request. May be 0 to fetch to the end of the file. + int64_t length_; + + // States of the worker. + bool is_paused_; + bool is_canceled_; + bool is_user_cancel_; + + // Used to control the network request. Live on UI thread. + std::unique_ptr<DownloadRequestHandleInterface> request_handle_; + + // Used to handle the url request. Live and die on IO thread. + UrlDownloadHandler::UniqueUrlDownloadHandlerPtr url_download_handler_; + + base::WeakPtrFactory<DownloadWorker> weak_factory_; + + DISALLOW_COPY_AND_ASSIGN(DownloadWorker); +}; + +} // namespace download + +#endif // COMPONENTS_DOWNLOAD_PUBLIC_COMMON_DOWNLOAD_WORKER_H_ diff --git a/chromium/components/download/internal/common/parallel_download_job.cc b/chromium/components/download/internal/common/parallel_download_job.cc new file mode 100644 index 00000000000..e139a42c314 --- /dev/null +++ b/chromium/components/download/internal/common/parallel_download_job.cc @@ -0,0 +1,294 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/common/parallel_download_job.h" + +#include <algorithm> + +#include "base/bind.h" +#include "base/metrics/histogram_macros.h" +#include "base/time/time.h" +#include "components/download/internal/common/parallel_download_utils.h" +#include "components/download/public/common/download_create_info.h" +#include "components/download/public/common/download_stats.h" +#include "net/traffic_annotation/network_traffic_annotation.h" + +namespace download { +namespace { +const int kDownloadJobVerboseLevel = 1; +} // namespace + +ParallelDownloadJob::ParallelDownloadJob( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle, + const DownloadCreateInfo& create_info, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + net::URLRequestContextGetter* url_request_context_getter) + : DownloadJobImpl(download_item, std::move(request_handle), true), + initial_request_offset_(create_info.offset), + initial_received_slices_(download_item->GetReceivedSlices()), + content_length_(create_info.total_bytes), + requests_sent_(false), + is_canceled_(false), + shared_url_loader_factory_(std::move(shared_url_loader_factory)), + url_request_context_getter_(url_request_context_getter) {} + +ParallelDownloadJob::~ParallelDownloadJob() = default; + +void ParallelDownloadJob::OnDownloadFileInitialized( + DownloadFile::InitializeCallback callback, + DownloadInterruptReason result, + int64_t bytes_wasted) { + DownloadJobImpl::OnDownloadFileInitialized(std::move(callback), result, + bytes_wasted); + if (result == DOWNLOAD_INTERRUPT_REASON_NONE) + BuildParallelRequestAfterDelay(); +} + +void ParallelDownloadJob::Cancel(bool user_cancel) { + is_canceled_ = true; + DownloadJobImpl::Cancel(user_cancel); + + if (!requests_sent_) { + timer_.Stop(); + return; + } + + for (auto& worker : workers_) + worker.second->Cancel(user_cancel); +} + +void ParallelDownloadJob::Pause() { + DownloadJobImpl::Pause(); + + if (!requests_sent_) { + timer_.Stop(); + return; + } + + for (auto& worker : workers_) + worker.second->Pause(); +} + +void ParallelDownloadJob::Resume(bool resume_request) { + DownloadJobImpl::Resume(resume_request); + if (!resume_request) + return; + + // Send parallel requests if the download is paused previously. + if (!requests_sent_) { + if (!timer_.IsRunning()) + BuildParallelRequestAfterDelay(); + return; + } + + for (auto& worker : workers_) + worker.second->Resume(); +} + +int ParallelDownloadJob::GetParallelRequestCount() const { + return GetParallelRequestCountConfig(); +} + +int64_t ParallelDownloadJob::GetMinSliceSize() const { + return GetMinSliceSizeConfig(); +} + +int ParallelDownloadJob::GetMinRemainingTimeInSeconds() const { + return GetParallelRequestRemainingTimeConfig().InSeconds(); +} + +void ParallelDownloadJob::CancelRequestWithOffset(int64_t offset) { + if (initial_request_offset_ == offset) { + DownloadJobImpl::Cancel(false); + return; + } + + auto it = workers_.find(offset); + DCHECK(it != workers_.end()); + it->second->Cancel(false); +} + +void ParallelDownloadJob::BuildParallelRequestAfterDelay() { + DCHECK(workers_.empty()); + DCHECK(!requests_sent_); + DCHECK(!timer_.IsRunning()); + + timer_.Start(FROM_HERE, GetParallelRequestDelayConfig(), this, + &ParallelDownloadJob::BuildParallelRequests); +} + +void ParallelDownloadJob::OnInputStreamReady( + DownloadWorker* worker, + std::unique_ptr<InputStream> input_stream) { + bool success = DownloadJob::AddInputStream( + std::move(input_stream), worker->offset(), worker->length()); + RecordParallelDownloadAddStreamSuccess(success); + + // Destroy the request if the sink is gone. + if (!success) { + VLOG(kDownloadJobVerboseLevel) + << "Byte stream arrived after download file is released."; + worker->Cancel(false); + } +} + +void ParallelDownloadJob::BuildParallelRequests() { + DCHECK(!requests_sent_); + DCHECK(!is_paused()); + if (is_canceled_ || + download_item_->GetState() != DownloadItem::DownloadState::IN_PROGRESS) { + return; + } + + // TODO(qinmin): The size of |slices_to_download| should be no larger than + // |kParallelRequestCount| unless |kParallelRequestCount| is changed after + // a download is interrupted. This could happen if we use finch to config + // the number of parallel requests. + // Get the next |kParallelRequestCount - 1| slices and fork + // new requests. For the remaining slices, they will be handled once some + // of the workers finish their job. + const DownloadItem::ReceivedSlices& received_slices = + download_item_->GetReceivedSlices(); + DownloadItem::ReceivedSlices slices_to_download = + FindSlicesToDownload(received_slices); + + DCHECK(!slices_to_download.empty()); + int64_t first_slice_offset = slices_to_download[0].offset; + + // We may build parallel job without slices. The slices can be cleared or + // previous session only has one stream writing to disk. In these cases, fall + // back to non parallel download. + if (initial_request_offset_ > first_slice_offset) { + VLOG(kDownloadJobVerboseLevel) + << "Received slices data mismatch initial request offset."; + return; + } + + // Create more slices for a new download. The initial request may generate + // a received slice. + if (slices_to_download.size() <= 1 && download_item_->GetTotalBytes() > 0) { + int64_t current_bytes_per_second = + std::max(static_cast<int64_t>(1), download_item_->CurrentSpeed()); + int64_t remaining_bytes = + download_item_->GetTotalBytes() - download_item_->GetReceivedBytes(); + + int64_t remaining_time = remaining_bytes / current_bytes_per_second; + UMA_HISTOGRAM_CUSTOM_COUNTS( + "Download.ParallelDownload.RemainingTimeWhenBuildingRequests", + remaining_time, 0, base::TimeDelta::FromDays(1).InSeconds(), 50); + if (remaining_bytes / current_bytes_per_second > + GetMinRemainingTimeInSeconds()) { + // Fork more requests to accelerate, only if one slice is left to download + // and remaining time seems to be long enough. + slices_to_download = FindSlicesForRemainingContent( + first_slice_offset, + content_length_ - first_slice_offset + initial_request_offset_, + GetParallelRequestCount(), GetMinSliceSize()); + } else { + RecordParallelDownloadCreationEvent( + ParallelDownloadCreationEvent::FALLBACK_REASON_REMAINING_TIME); + } + } + + DCHECK(!slices_to_download.empty()); + + // If the last received slice is finished, remove the last request which can + // be out of the range of the file. E.g, the file is 100 bytes, and the last + // request's range header will be "Range:100-". + if (!received_slices.empty() && received_slices.back().finished) + slices_to_download.pop_back(); + + ForkSubRequests(slices_to_download); + RecordParallelDownloadRequestCount( + static_cast<int>(slices_to_download.size())); + requests_sent_ = true; +} + +void ParallelDownloadJob::ForkSubRequests( + const DownloadItem::ReceivedSlices& slices_to_download) { + // If the initial request is working on the first hole, don't create parallel + // request for this hole. + bool skip_first_slice = true; + DownloadItem::ReceivedSlices initial_slices_to_download = + FindSlicesToDownload(initial_received_slices_); + if (initial_slices_to_download.size() > 1) { + DCHECK_EQ(initial_request_offset_, initial_slices_to_download[0].offset); + int64_t first_hole_max = initial_slices_to_download[0].offset + + initial_slices_to_download[0].received_bytes; + skip_first_slice = slices_to_download[0].offset <= first_hole_max; + } + + for (auto it = slices_to_download.begin(); it != slices_to_download.end(); + ++it) { + if (skip_first_slice) { + skip_first_slice = false; + continue; + } + + DCHECK_GE(it->offset, initial_request_offset_); + // All parallel requests are half open, which sends request headers like + // "Range:50-". + // If server rejects a certain request, others should take over. + CreateRequest(it->offset, DownloadSaveInfo::kLengthFullContent); + } +} + +void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { + DCHECK(download_item_); + DCHECK_EQ(DownloadSaveInfo::kLengthFullContent, length); + + auto worker = std::make_unique<DownloadWorker>(this, offset, length); + + net::NetworkTrafficAnnotationTag traffic_annotation = + net::DefineNetworkTrafficAnnotation("parallel_download_job", R"( + semantics { + sender: "Parallel Download" + description: + "Chrome makes parallel request to speed up download of a file." + trigger: + "When user starts a download request, if it would be technically " + "possible, Chrome starts parallel downloading." + data: "None." + destination: WEBSITE + } + policy { + cookies_allowed: YES + cookies_store: "user" + setting: "This feature cannot be disabled in settings." + chrome_policy { + DownloadRestrictions { + DownloadRestrictions: 3 + } + } + })"); + // The parallel requests only use GET method. + std::unique_ptr<DownloadUrlParameters> download_params( + new DownloadUrlParameters(download_item_->GetURL(), + url_request_context_getter_.get(), + traffic_annotation)); + download_params->set_file_path(download_item_->GetFullPath()); + download_params->set_last_modified(download_item_->GetLastModifiedTime()); + download_params->set_etag(download_item_->GetETag()); + download_params->set_offset(offset); + + // Setting the length will result in range request to fetch a slice of the + // file. + download_params->set_length(length); + + // Subsequent range requests don't need the "If-Range" header. + download_params->set_use_if_range(false); + + // Subsequent range requests have the same referrer URL as the original + // download request. + download_params->set_referrer(download_item_->GetReferrerUrl()); + download_params->set_referrer_policy(net::URLRequest::NEVER_CLEAR_REFERRER); + + // Send the request. + worker->SendRequest(std::move(download_params), shared_url_loader_factory_); + DCHECK(workers_.find(offset) == workers_.end()); + workers_[offset] = std::move(worker); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/parallel_download_job.h b/chromium/components/download/internal/common/parallel_download_job.h new file mode 100644 index 00000000000..c37fb07eb96 --- /dev/null +++ b/chromium/components/download/internal/common/parallel_download_job.h @@ -0,0 +1,125 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_DOWNLOAD_INTERNAL_COMMON_PARALLEL_DOWNLOAD_JOB_H_ +#define COMPONENTS_DOWNLOAD_INTERNAL_COMMON_PARALLEL_DOWNLOAD_JOB_H_ + +#include <memory> +#include <unordered_map> +#include <vector> + +#include "base/macros.h" +#include "base/timer/timer.h" +#include "components/download/internal/common/download_job_impl.h" +#include "components/download/internal/common/download_worker.h" +#include "components/download/public/common/download_export.h" +#include "components/download/public/common/parallel_download_configs.h" + +namespace net { +class URLRequestContextGetter; +} + +namespace download { + +// DownloadJob that can create concurrent range requests to fetch different +// parts of the file. +// The original request is hold in base class. +class COMPONENTS_DOWNLOAD_EXPORT ParallelDownloadJob + : public DownloadJobImpl, + public DownloadWorker::Delegate { + public: + // TODO(qinmin): Remove |url_request_context_getter| once network service is + // enabled. + ParallelDownloadJob( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle, + const DownloadCreateInfo& create_info, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + net::URLRequestContextGetter* url_request_context_getter); + ~ParallelDownloadJob() override; + + // DownloadJobImpl implementation. + void Cancel(bool user_cancel) override; + void Pause() override; + void Resume(bool resume_request) override; + void CancelRequestWithOffset(int64_t offset) override; + + protected: + // DownloadJobImpl implementation. + void OnDownloadFileInitialized(DownloadFile::InitializeCallback callback, + DownloadInterruptReason result, + int64_t bytes_wasted) override; + + // Virtual for testing. + virtual int GetParallelRequestCount() const; + virtual int64_t GetMinSliceSize() const; + virtual int GetMinRemainingTimeInSeconds() const; + + using WorkerMap = + std::unordered_map<int64_t, std::unique_ptr<DownloadWorker>>; + + // Map from the offset position of the slice to the worker that downloads the + // slice. + WorkerMap workers_; + + private: + friend class ParallelDownloadJobTest; + + // DownloadWorker::Delegate implementation. + void OnInputStreamReady(DownloadWorker* worker, + std::unique_ptr<InputStream> input_stream) override; + + // Build parallel requests after a delay, to effectively measure the single + // stream bandwidth. + void BuildParallelRequestAfterDelay(); + + // Build parallel requests to download. This function is the entry point for + // all parallel downloads. + void BuildParallelRequests(); + + // Build one http request for each slice from the second slice. + // The first slice represents the original request. + void ForkSubRequests(const DownloadItem::ReceivedSlices& slices_to_download); + + // Create one range request, virtual for testing. Range request will start + // from |offset| to |length|. Range request will be half open, e.g. + // "Range:50-" if |length| is 0. + virtual void CreateRequest(int64_t offset, int64_t length); + + // Information about the initial request when download is started. + int64_t initial_request_offset_; + + // A snapshot of received slices when creating the parallel download job. + // Download item's received slices may be different from this snapshot when + // |BuildParallelRequests| is called. + DownloadItem::ReceivedSlices initial_received_slices_; + + // The length of the response body of the original request. + // Used to estimate the remaining size of the content when the initial + // request is half open, i.e, |initial_request_length_| is + // DownloadSaveInfo::kLengthFullContent. + int64_t content_length_; + + // Used to send parallel requests after a delay based on Finch config. + base::OneShotTimer timer_; + + // If we have sent parallel requests. + bool requests_sent_; + + // If the download progress is canceled. + bool is_canceled_; + + // SharedURLLoaderFactory to issue network requests with network service + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory_; + + // URLRequestContextGetter for issueing network requests when network service + // is disabled. + scoped_refptr<net::URLRequestContextGetter> url_request_context_getter_; + + DISALLOW_COPY_AND_ASSIGN(ParallelDownloadJob); +}; + +} // namespace download + +#endif // COMPONENTS_DOWNLOAD_INTERNAL_COMMON_PARALLEL_DOWNLOAD_JOB_H_ diff --git a/chromium/components/download/internal/common/parallel_download_job_unittest.cc b/chromium/components/download/internal/common/parallel_download_job_unittest.cc new file mode 100644 index 00000000000..eb09f931707 --- /dev/null +++ b/chromium/components/download/internal/common/parallel_download_job_unittest.cc @@ -0,0 +1,530 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/common/parallel_download_job.h" + +#include <utility> +#include <vector> + +#include "base/run_loop.h" +#include "base/test/mock_callback.h" +#include "base/test/scoped_task_environment.h" +#include "components/download/internal/common/parallel_download_utils.h" +#include "components/download/public/common/download_create_info.h" +#include "components/download/public/common/download_destination_observer.h" +#include "components/download/public/common/download_file_impl.h" +#include "components/download/public/common/download_task_runner.h" +#include "components/download/public/common/mock_download_item.h" +#include "components/download/public/common/mock_input_stream.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using ::testing::_; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::ReturnRef; +using ::testing::StrictMock; + +namespace download { + +namespace { + +class MockDownloadRequestHandle : public DownloadRequestHandleInterface { + public: + MOCK_METHOD0(PauseRequest, void()); + MOCK_METHOD0(ResumeRequest, void()); + MOCK_METHOD1(CancelRequest, void(bool)); +}; + +class MockDownloadDestinationObserver : public DownloadDestinationObserver { + public: + MOCK_METHOD3(DestinationUpdate, + void(int64_t, + int64_t, + const std::vector<DownloadItem::ReceivedSlice>&)); + void DestinationError( + DownloadInterruptReason reason, + int64_t bytes_so_far, + std::unique_ptr<crypto::SecureHash> hash_state) override {} + void DestinationCompleted( + int64_t total_bytes, + std::unique_ptr<crypto::SecureHash> hash_state) override {} + MOCK_METHOD2(CurrentUpdateStatus, void(int64_t, int64_t)); +}; + +} // namespace + +class ParallelDownloadJobForTest : public ParallelDownloadJob { + public: + ParallelDownloadJobForTest( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle, + const DownloadCreateInfo& create_info, + int request_count, + int64_t min_slice_size, + int min_remaining_time) + : ParallelDownloadJob(download_item, + std::move(request_handle), + create_info, + nullptr, + nullptr), + request_count_(request_count), + min_slice_size_(min_slice_size), + min_remaining_time_(min_remaining_time) {} + + void CreateRequest(int64_t offset, int64_t length) override { + auto worker = std::make_unique<DownloadWorker>(this, offset, length); + + DCHECK(workers_.find(offset) == workers_.end()); + workers_[offset] = std::move(worker); + } + + ParallelDownloadJob::WorkerMap& workers() { return workers_; } + + void MakeFileInitialized(DownloadFile::InitializeCallback callback, + DownloadInterruptReason result) { + ParallelDownloadJob::OnDownloadFileInitialized(std::move(callback), result, + 0); + } + + int GetParallelRequestCount() const override { return request_count_; } + int64_t GetMinSliceSize() const override { return min_slice_size_; } + int GetMinRemainingTimeInSeconds() const override { + return min_remaining_time_; + } + + void OnInputStreamReady(DownloadWorker* worker, + std::unique_ptr<InputStream> input_stream) override { + CountOnInputStreamReady(); + } + + MOCK_METHOD0(CountOnInputStreamReady, void()); + + private: + int request_count_; + int min_slice_size_; + int min_remaining_time_; + DISALLOW_COPY_AND_ASSIGN(ParallelDownloadJobForTest); +}; + +class ParallelDownloadJobTest : public testing::Test { + public: + ParallelDownloadJobTest() + : task_environment_( + base::test::ScopedTaskEnvironment::MainThreadType::UI, + base::test::ScopedTaskEnvironment::ExecutionMode::QUEUED) {} + + void CreateParallelJob(int64_t initial_request_offset, + int64_t content_length, + const DownloadItem::ReceivedSlices& slices, + int request_count, + int64_t min_slice_size, + int min_remaining_time) { + received_slices_ = slices; + download_item_ = std::make_unique<NiceMock<MockDownloadItem>>(); + EXPECT_CALL(*download_item_, GetTotalBytes()) + .WillRepeatedly(Return(initial_request_offset + content_length)); + EXPECT_CALL(*download_item_, GetReceivedBytes()) + .WillRepeatedly(Return(initial_request_offset)); + EXPECT_CALL(*download_item_, GetReceivedSlices()) + .WillRepeatedly(ReturnRef(received_slices_)); + + DownloadCreateInfo info; + info.offset = initial_request_offset; + info.total_bytes = content_length; + std::unique_ptr<MockDownloadRequestHandle> request_handle = + std::make_unique<MockDownloadRequestHandle>(); + mock_request_handle_ = request_handle.get(); + job_ = std::make_unique<ParallelDownloadJobForTest>( + download_item_.get(), std::move(request_handle), info, request_count, + min_slice_size, min_remaining_time); + file_initialized_ = false; + } + + void DestroyParallelJob() { + job_.reset(); + download_item_.reset(); + mock_request_handle_ = nullptr; + } + + void BuildParallelRequests() { job_->BuildParallelRequests(); } + + void set_received_slices(const DownloadItem::ReceivedSlices& slices) { + received_slices_ = slices; + } + + bool IsJobCanceled() const { return job_->is_canceled_; }; + + void MakeWorkerReady( + DownloadWorker* worker, + std::unique_ptr<MockDownloadRequestHandle> request_handle) { + UrlDownloadHandler::Delegate* delegate = + static_cast<UrlDownloadHandler::Delegate*>(worker); + std::unique_ptr<DownloadCreateInfo> create_info = + std::make_unique<DownloadCreateInfo>(); + create_info->request_handle = std::move(request_handle); + delegate->OnUrlDownloadStarted(std::move(create_info), + std::make_unique<MockInputStream>(), nullptr, + DownloadUrlParameters::OnStartedCallback()); + } + + void VerifyWorker(int64_t offset, int64_t length) const { + EXPECT_TRUE(job_->workers_.find(offset) != job_->workers_.end()); + EXPECT_EQ(offset, job_->workers_[offset]->offset()); + EXPECT_EQ(length, job_->workers_[offset]->length()); + } + + void OnFileInitialized(DownloadInterruptReason result, int64_t bytes_wasted) { + file_initialized_ = true; + } + + base::test::ScopedTaskEnvironment task_environment_; + std::unique_ptr<MockDownloadItem> download_item_; + std::unique_ptr<ParallelDownloadJobForTest> job_; + bool file_initialized_; + // Request handle for the original request. + MockDownloadRequestHandle* mock_request_handle_; + + // The received slices used to return in + // |MockDownloadItemImpl::GetReceivedSlices| mock function. + DownloadItem::ReceivedSlices received_slices_; +}; + +// Test if parallel requests can be built correctly for a new download without +// existing slices. +TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithoutSlices) { + // Totally 2 requests for 100 bytes. + // Original request: Range:0-, for 50 bytes. + // Task 1: Range:50-, for 50 bytes. + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(1u, job_->workers().size()); + VerifyWorker(50, 0); + DestroyParallelJob(); + + // Totally 3 requests for 100 bytes. + // Original request: Range:0-, for 33 bytes. + // Task 1: Range:33-, for 33 bytes. + // Task 2: Range:66-, for 34 bytes. + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 3, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(2u, job_->workers().size()); + VerifyWorker(33, 0); + VerifyWorker(66, 0); + DestroyParallelJob(); + + // Less than 2 requests, do nothing. + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 1, 1, 10); + BuildParallelRequests(); + EXPECT_TRUE(job_->workers().empty()); + DestroyParallelJob(); + + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 0, 1, 10); + BuildParallelRequests(); + EXPECT_TRUE(job_->workers().empty()); + DestroyParallelJob(); + + // Content-length is 0, do nothing. + CreateParallelJob(0, 0, DownloadItem::ReceivedSlices(), 3, 1, 10); + BuildParallelRequests(); + EXPECT_TRUE(job_->workers().empty()); + DestroyParallelJob(); +} + +TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { + // File size: 100 bytes. + // Received slices: [0, 17] + // Original request: Range:12-. Content-length: 88. + // Totally 3 requests for 83 bytes. + // Original request: Range:12-43. + // Task 1: Range:44-70, for 27 bytes. + // Task 2: Range:71-, for 29 bytes. + DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 17)}; + CreateParallelJob(12, 88, slices, 3, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(2u, job_->workers().size()); + VerifyWorker(44, 0); + VerifyWorker(71, 0); + DestroyParallelJob(); + + // File size: 100 bytes. + // Received slices: [0, 60], Range:0-59. + // Original request: Range:60-. Content-length: 40. + // 40 bytes left for 4 requests. Only 1 additional request. + // Original request: Range:60-79, for 20 bytes. + // Task 1: Range:80-, for 20 bytes. + slices = {DownloadItem::ReceivedSlice(0, 60)}; + CreateParallelJob(60, 40, slices, 4, 20, 10); + BuildParallelRequests(); + EXPECT_EQ(1u, job_->workers().size()); + VerifyWorker(80, 0); + DestroyParallelJob(); + + // Content-Length is 0, no additional requests. + slices = {DownloadItem::ReceivedSlice(0, 100)}; + CreateParallelJob(100, 0, slices, 3, 1, 10); + BuildParallelRequests(); + EXPECT_TRUE(job_->workers().empty()); + DestroyParallelJob(); + + // File size: 100 bytes. + // Original request: Range:0-. Content-length: 12(Incorrect server header). + // The request count is 2, however the file contains 3 holes, and we don't + // know if the last slice is completed, so there should be 3 requests in + // parallel and the last request is an out-of-range request. + slices = { + DownloadItem::ReceivedSlice(10, 10), DownloadItem::ReceivedSlice(20, 10), + DownloadItem::ReceivedSlice(40, 10), DownloadItem::ReceivedSlice(90, 10)}; + CreateParallelJob(0, 12, slices, 2, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(3u, job_->workers().size()); + VerifyWorker(30, 0); + VerifyWorker(50, 0); + VerifyWorker(100, 0); + DestroyParallelJob(); +} + +// Ensure that in download resumption, if the first hole is filled before +// sending multiple requests, the new requests can be correctly calculated. +TEST_F(ParallelDownloadJobTest, CreateResumptionRequestsFirstSliceFilled) { + DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 10), + DownloadItem::ReceivedSlice(40, 10), + DownloadItem::ReceivedSlice(80, 10)}; + + // The updated slices that has filled the first hole. + DownloadItem::ReceivedSlices updated_slices = slices; + updated_slices[0].received_bytes = 40; + + CreateParallelJob(10, 90, slices, 3, 1, 10); + // Now let download item to return an updated received slice, that the first + // hole in the file has been filled. + set_received_slices(updated_slices); + BuildParallelRequests(); + + // Since the first hole is filled, parallel requests are created to fill other + // two holes. + EXPECT_EQ(2u, job_->workers().size()); + VerifyWorker(50, 0); + VerifyWorker(90, 0); + DestroyParallelJob(); +} + +// Simulate an edge case that we have one received slice in the middle. The +// parallel request should be created correctly. +// This may not happen under current implementation, but should be also handled +// correctly. +TEST_F(ParallelDownloadJobTest, CreateResumptionRequestsTwoSlicesToFill) { + DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(40, 10)}; + + CreateParallelJob(0, 100, slices, 3, 1, 10); + BuildParallelRequests(); + + EXPECT_EQ(1u, job_->workers().size()); + VerifyWorker(50, 0); + DestroyParallelJob(); + + DownloadItem::ReceivedSlices updated_slices = { + DownloadItem::ReceivedSlice(0, 10), DownloadItem::ReceivedSlice(40, 10)}; + + CreateParallelJob(0, 100, slices, 3, 1, 10); + // Now let download item to return an updated received slice, that the first + // hole in the file is not fully filled. + set_received_slices(updated_slices); + BuildParallelRequests(); + + // Because the initial request is working on the first hole, there should be + // only one parallel request to fill the second hole. + EXPECT_EQ(1u, job_->workers().size()); + VerifyWorker(50, 0); + DestroyParallelJob(); +} + +// Verifies that if the last received slice is finished, we don't send an out +// of range request that starts from the last byte position. +TEST_F(ParallelDownloadJobTest, LastReceivedSliceFinished) { + // One finished slice, no parallel requests should be created. Content length + // should be 0. + DownloadItem::ReceivedSlices slices = { + DownloadItem::ReceivedSlice(0, 100, true)}; + CreateParallelJob(100, 0, slices, 3, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(0u, job_->workers().size()); + DestroyParallelJob(); + + // Two received slices with one hole in the middle. Since the second slice is + // finished, and the hole will be filled by original request, no parallel + // requests will be created. + slices = {DownloadItem::ReceivedSlice(0, 25), + DownloadItem::ReceivedSlice(75, 25, true)}; + CreateParallelJob(25, 100, slices, 3, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(0u, job_->workers().size()); + DestroyParallelJob(); + + // Three received slices with two hole in the middle and the last slice is + // finished. The original request will work on the first hole and one parallel + // request is created to fill the second hole. + slices = {DownloadItem::ReceivedSlice(0, 25), + DownloadItem::ReceivedSlice(50, 25), + DownloadItem::ReceivedSlice(100, 25, true)}; + CreateParallelJob(25, 125, slices, 3, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(1u, job_->workers().size()); + VerifyWorker(75, 0); + DestroyParallelJob(); + + // Three received slices with two hole in the middle and the last slice is + // finished. + slices = {DownloadItem::ReceivedSlice(0, 25), + DownloadItem::ReceivedSlice(50, 25), + DownloadItem::ReceivedSlice(100, 25, true)}; + CreateParallelJob(25, 125, slices, 3, 1, 10); + + // If the first hole is filled by the original request after the job is + // initialized but before parallel request is created, the second hole should + // be filled, and no out of range request will be created. + slices[0].received_bytes = 50; + set_received_slices(slices); + BuildParallelRequests(); + EXPECT_EQ(1u, job_->workers().size()); + VerifyWorker(75, 0); + DestroyParallelJob(); +} + +// Pause, cancel, resume can be called before or after the worker establish +// the byte stream. +// These tests ensure the states consistency between the job and workers. + +// Ensure cancel before building the requests will result in no requests are +// built. +TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) { + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1, 10); + EXPECT_CALL(*mock_request_handle_, CancelRequest(_)); + + // Job is canceled before building parallel requests. + job_->Cancel(true); + EXPECT_TRUE(IsJobCanceled()); + + BuildParallelRequests(); + EXPECT_TRUE(job_->workers().empty()); + + DestroyParallelJob(); +} + +// Ensure cancel before adding the byte stream will result in workers being +// canceled. +TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) { + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1, 10); + EXPECT_CALL(*mock_request_handle_, CancelRequest(_)); + + BuildParallelRequests(); + VerifyWorker(50, 0); + + // Job is canceled after building parallel requests and before byte streams + // are added to the file sink. + job_->Cancel(true); + EXPECT_TRUE(IsJobCanceled()); + + for (auto& worker : job_->workers()) { + std::unique_ptr<MockDownloadRequestHandle> mock_handle = + std::make_unique<MockDownloadRequestHandle>(); + EXPECT_CALL(*mock_handle.get(), CancelRequest(_)); + MakeWorkerReady(worker.second.get(), std::move(mock_handle)); + } + + DestroyParallelJob(); +} + +// Ensure pause before adding the byte stream will result in workers being +// paused. +TEST_F(ParallelDownloadJobTest, EarlyPauseBeforeByteStreamReady) { + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1, 10); + EXPECT_CALL(*mock_request_handle_, PauseRequest()); + + BuildParallelRequests(); + VerifyWorker(50, 0); + + // Job is paused after building parallel requests and before adding the byte + // stream to the file sink. + job_->Pause(); + EXPECT_TRUE(job_->is_paused()); + + for (auto& worker : job_->workers()) { + EXPECT_CALL(*job_.get(), CountOnInputStreamReady()); + std::unique_ptr<MockDownloadRequestHandle> mock_handle = + std::make_unique<MockDownloadRequestHandle>(); + EXPECT_CALL(*mock_handle.get(), PauseRequest()); + MakeWorkerReady(worker.second.get(), std::move(mock_handle)); + } + + DestroyParallelJob(); +} + +// Test that parallel request is not created if the remaining content can be +// finish downloading soon. +TEST_F(ParallelDownloadJobTest, RemainingContentWillFinishSoon) { + DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 99)}; + CreateParallelJob(99, 1, slices, 3, 1, 10); + BuildParallelRequests(); + EXPECT_EQ(0u, job_->workers().size()); + + DestroyParallelJob(); +} + +// Test that parallel request is not created until download file is initialized. +TEST_F(ParallelDownloadJobTest, ParallelRequestNotCreatedUntilFileInitialized) { + auto save_info = std::make_unique<DownloadSaveInfo>(); + StrictMock<MockInputStream>* input_stream = new StrictMock<MockInputStream>(); + auto observer = + std::make_unique<StrictMock<MockDownloadDestinationObserver>>(); + base::WeakPtrFactory<DownloadDestinationObserver> observer_factory( + observer.get()); + auto download_file = std::make_unique<DownloadFileImpl>( + std::move(save_info), base::FilePath(), + std::unique_ptr<MockInputStream>(input_stream), DownloadItem::kInvalidId, + observer_factory.GetWeakPtr()); + CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 0, 0); + job_->Start(download_file.get(), + base::Bind(&ParallelDownloadJobTest::OnFileInitialized, + base::Unretained(this)), + DownloadItem::ReceivedSlices()); + EXPECT_FALSE(file_initialized_); + EXPECT_EQ(0u, job_->workers().size()); + EXPECT_CALL(*input_stream, RegisterDataReadyCallback(_)); + EXPECT_CALL(*input_stream, Read(_, _)); + EXPECT_CALL(*(observer.get()), DestinationUpdate(_, _, _)); + task_environment_.RunUntilIdle(); + EXPECT_TRUE(file_initialized_); + EXPECT_EQ(1u, job_->workers().size()); + DestroyParallelJob(); + + // The download file lives on the download sequence, and must + // be deleted there. + GetDownloadTaskRunner()->DeleteSoon(FROM_HERE, std::move(download_file)); + task_environment_.RunUntilIdle(); +} + +// Interruption from IO thread after the file initialized and before building +// the parallel requests, should correctly stop the download. +TEST_F(ParallelDownloadJobTest, InterruptOnStartup) { + DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 99)}; + CreateParallelJob(99, 1, slices, 3, 1, 10); + + // Start to build the requests without any error. + base::MockCallback<DownloadFile::InitializeCallback> callback; + EXPECT_CALL(callback, Run(_, _)).Times(1); + job_->MakeFileInitialized(callback.Get(), DOWNLOAD_INTERRUPT_REASON_NONE); + + // Simulate and inject an error from IO thread after file initialized. + EXPECT_CALL(*download_item_.get(), GetState()) + .WillRepeatedly(Return(DownloadItem::DownloadState::INTERRUPTED)); + + // Because of the error, no parallel requests are built. + task_environment_.RunUntilIdle(); + EXPECT_EQ(0u, job_->workers().size()); + + DestroyParallelJob(); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/parallel_download_utils.cc b/chromium/components/download/internal/common/parallel_download_utils.cc new file mode 100644 index 00000000000..88148139328 --- /dev/null +++ b/chromium/components/download/internal/common/parallel_download_utils.cc @@ -0,0 +1,221 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/common/parallel_download_utils.h" + +#include "base/metrics/field_trial_params.h" +#include "base/strings/string_number_conversions.h" +#include "base/time/time.h" +#include "components/download/public/common/download_features.h" +#include "components/download/public/common/download_save_info.h" +#include "components/download/public/common/parallel_download_configs.h" + +namespace download { + +namespace { + +// Default value for |kMinSliceSizeFinchKey|, when no parameter is specified. +const int64_t kMinSliceSizeParallelDownload = 1365333; + +// Default value for |kParallelRequestCountFinchKey|, when no parameter is +// specified. +const int kParallelRequestCount = 3; + +// The default remaining download time in seconds required for parallel request +// creation. +const int kDefaultRemainingTimeInSeconds = 2; + +// TODO(qinmin): replace this with a comparator operator in +// DownloadItem::ReceivedSlice. +bool compareReceivedSlices(const DownloadItem::ReceivedSlice& lhs, + const DownloadItem::ReceivedSlice& rhs) { + return lhs.offset < rhs.offset; +} + +} // namespace + +std::vector<DownloadItem::ReceivedSlice> FindSlicesToDownload( + const std::vector<DownloadItem::ReceivedSlice>& received_slices) { + std::vector<DownloadItem::ReceivedSlice> result; + if (received_slices.empty()) { + result.emplace_back(0, DownloadSaveInfo::kLengthFullContent); + return result; + } + + std::vector<DownloadItem::ReceivedSlice>::const_iterator iter = + received_slices.begin(); + DCHECK_GE(iter->offset, 0); + if (iter->offset != 0) + result.emplace_back(0, iter->offset); + + while (true) { + int64_t offset = iter->offset + iter->received_bytes; + std::vector<DownloadItem::ReceivedSlice>::const_iterator next = + std::next(iter); + if (next == received_slices.end()) { + result.emplace_back(offset, DownloadSaveInfo::kLengthFullContent); + break; + } + + DCHECK_GE(next->offset, offset); + if (next->offset > offset) + result.emplace_back(offset, next->offset - offset); + iter = next; + } + return result; +} + +size_t AddOrMergeReceivedSliceIntoSortedArray( + const DownloadItem::ReceivedSlice& new_slice, + std::vector<DownloadItem::ReceivedSlice>& received_slices) { + std::vector<DownloadItem::ReceivedSlice>::iterator it = + std::upper_bound(received_slices.begin(), received_slices.end(), + new_slice, compareReceivedSlices); + if (it != received_slices.begin()) { + std::vector<DownloadItem::ReceivedSlice>::iterator prev = std::prev(it); + if (prev->offset + prev->received_bytes == new_slice.offset) { + prev->received_bytes += new_slice.received_bytes; + return static_cast<size_t>(std::distance(received_slices.begin(), prev)); + } + } + + it = received_slices.emplace(it, new_slice); + return static_cast<size_t>(std::distance(received_slices.begin(), it)); +} + +bool CanRecoverFromError( + const DownloadFileImpl::SourceStream* error_stream, + const DownloadFileImpl::SourceStream* preceding_neighbor) { + DCHECK(error_stream->offset() >= preceding_neighbor->offset()) + << "Preceding" + "stream's offset should be smaller than the error stream."; + DCHECK_GE(error_stream->length(), 0); + + if (preceding_neighbor->is_finished()) { + // Check if the preceding stream fetched to the end of the file without + // error. The error stream doesn't need to download anything. + if (preceding_neighbor->length() == DownloadSaveInfo::kLengthFullContent && + preceding_neighbor->GetCompletionStatus() == + DOWNLOAD_INTERRUPT_REASON_NONE) { + return true; + } + + // Check if finished preceding stream has already downloaded all data for + // the error stream. + if (error_stream->length() > 0) { + return error_stream->offset() + error_stream->length() <= + preceding_neighbor->offset() + preceding_neighbor->bytes_written(); + } + + return false; + } + + // If preceding stream is half open, and still working, we can recover. + if (preceding_neighbor->length() == DownloadSaveInfo::kLengthFullContent) { + return true; + } + + // Check if unfinished preceding stream is able to download data for error + // stream in the future only when preceding neighbor and error stream both + // have an upper bound. + if (error_stream->length() > 0 && preceding_neighbor->length() > 0) { + return error_stream->offset() + error_stream->length() <= + preceding_neighbor->offset() + preceding_neighbor->length(); + } + + return false; +} + +void DebugSlicesInfo(const DownloadItem::ReceivedSlices& slices) { + DVLOG(1) << "Received slices size : " << slices.size(); + for (const auto& it : slices) { + DVLOG(1) << "Slice offset = " << it.offset + << " , received_bytes = " << it.received_bytes + << " , finished = " << it.finished; + } +} + +std::vector<DownloadItem::ReceivedSlice> FindSlicesForRemainingContent( + int64_t current_offset, + int64_t total_length, + int request_count, + int64_t min_slice_size) { + std::vector<DownloadItem::ReceivedSlice> new_slices; + + if (request_count > 0) { + int64_t slice_size = + std::max<int64_t>(total_length / request_count, min_slice_size); + slice_size = slice_size > 0 ? slice_size : 1; + for (int i = 0, num_requests = total_length / slice_size; + i < num_requests - 1; ++i) { + new_slices.emplace_back(current_offset, slice_size); + current_offset += slice_size; + } + } + + // No strong assumption that content length header is correct. So the last + // slice is always half open, which sends range request like "Range:50-". + new_slices.emplace_back(current_offset, DownloadSaveInfo::kLengthFullContent); + return new_slices; +} + +int64_t GetMinSliceSizeConfig() { + std::string finch_value = base::GetFieldTrialParamValueByFeature( + features::kParallelDownloading, kMinSliceSizeFinchKey); + int64_t result; + return base::StringToInt64(finch_value, &result) + ? result + : kMinSliceSizeParallelDownload; +} + +int GetParallelRequestCountConfig() { + std::string finch_value = base::GetFieldTrialParamValueByFeature( + features::kParallelDownloading, kParallelRequestCountFinchKey); + int result; + return base::StringToInt(finch_value, &result) ? result + : kParallelRequestCount; +} + +base::TimeDelta GetParallelRequestDelayConfig() { + std::string finch_value = base::GetFieldTrialParamValueByFeature( + features::kParallelDownloading, kParallelRequestDelayFinchKey); + int64_t time_ms = 0; + return base::StringToInt64(finch_value, &time_ms) + ? base::TimeDelta::FromMilliseconds(time_ms) + : base::TimeDelta::FromMilliseconds(0); +} + +base::TimeDelta GetParallelRequestRemainingTimeConfig() { + std::string finch_value = base::GetFieldTrialParamValueByFeature( + features::kParallelDownloading, kParallelRequestRemainingTimeFinchKey); + int time_in_seconds = 0; + return base::StringToInt(finch_value, &time_in_seconds) + ? base::TimeDelta::FromSeconds(time_in_seconds) + : base::TimeDelta::FromSeconds(kDefaultRemainingTimeInSeconds); +} + +int64_t GetMaxContiguousDataBlockSizeFromBeginning( + const DownloadItem::ReceivedSlices& slices) { + std::vector<DownloadItem::ReceivedSlice>::const_iterator iter = + slices.begin(); + + int64_t size = 0; + while (iter != slices.end() && iter->offset == size) { + size += iter->received_bytes; + iter++; + } + return size; +} + +bool IsParallelDownloadEnabled() { + bool feature_enabled = + base::FeatureList::IsEnabled(features::kParallelDownloading); + // Disabled when |kEnableParallelDownloadFinchKey| Finch config is set to + // false. + bool enabled_parameter = GetFieldTrialParamByFeatureAsBool( + features::kParallelDownloading, kEnableParallelDownloadFinchKey, true); + return feature_enabled && enabled_parameter; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/parallel_download_utils.h b/chromium/components/download/internal/common/parallel_download_utils.h new file mode 100644 index 00000000000..eab3eb02d0a --- /dev/null +++ b/chromium/components/download/internal/common/parallel_download_utils.h @@ -0,0 +1,79 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_DOWNLOAD_INTERNAL_COMMON_PARALLEL_DOWNLOAD_UTILS_H_ +#define COMPONENTS_DOWNLOAD_INTERNAL_COMMON_PARALLEL_DOWNLOAD_UTILS_H_ + +#include <vector> + +#include "components/download/public/common/download_export.h" +#include "components/download/public/common/download_file_impl.h" +#include "components/download/public/common/download_item.h" + +namespace download { + +// Given an array of slices that are received, returns an array of slices to +// download. |received_slices| must be ordered by offsets. +COMPONENTS_DOWNLOAD_EXPORT std::vector<DownloadItem::ReceivedSlice> +FindSlicesToDownload( + const std::vector<DownloadItem::ReceivedSlice>& received_slices); + +// Adds or merges a new received slice into a vector of sorted slices. If the +// slice can be merged with the slice preceding it, merge the 2 slices. +// Otherwise, insert the slice and keep the vector sorted. Returns the index +// of the newly updated slice. +COMPONENTS_DOWNLOAD_EXPORT size_t AddOrMergeReceivedSliceIntoSortedArray( + const DownloadItem::ReceivedSlice& new_slice, + std::vector<DownloadItem::ReceivedSlice>& received_slices); + +// Returns if a preceding stream can still download the part of content that +// was arranged to |error_stream|. +COMPONENTS_DOWNLOAD_EXPORT bool CanRecoverFromError( + const DownloadFileImpl::SourceStream* error_stream, + const DownloadFileImpl::SourceStream* preceding_neighbor); + +// Chunks the content that starts from |current_offset|, into at most +// std::max(|request_count|, 1) smaller slices. +// Each slice contains at least |min_slice_size| bytes unless |total_length| +// is less than |min_slice_size|. +// The last slice is half opened. +COMPONENTS_DOWNLOAD_EXPORT std::vector<download::DownloadItem::ReceivedSlice> +FindSlicesForRemainingContent(int64_t current_offset, + int64_t total_length, + int request_count, + int64_t min_slice_size); + +// Finch configuration utilities. +// +// Get the minimum slice size to use parallel download from finch configuration. +// A slice won't be further chunked into smaller slices if the size is less +// than the minimum size. +COMPONENTS_DOWNLOAD_EXPORT int64_t GetMinSliceSizeConfig(); + +// Get the request count for parallel download from finch configuration. +COMPONENTS_DOWNLOAD_EXPORT int GetParallelRequestCountConfig(); + +// Get the time delay to send parallel requests after the response of original +// request is handled. +COMPONENTS_DOWNLOAD_EXPORT base::TimeDelta GetParallelRequestDelayConfig(); + +// Get the required remaining time before creating parallel requests. +COMPONENTS_DOWNLOAD_EXPORT base::TimeDelta +GetParallelRequestRemainingTimeConfig(); + +// Given an ordered array of slices, get the maximum size of a contiguous data +// block that starts from offset 0. If the first slice doesn't start from offset +// 0, return 0. +COMPONENTS_DOWNLOAD_EXPORT int64_t GetMaxContiguousDataBlockSizeFromBeginning( + const download::DownloadItem::ReceivedSlices& slices); + +// Returns whether parallel download is enabled. +COMPONENTS_DOWNLOAD_EXPORT bool IsParallelDownloadEnabled(); + +// Print the states of received slices for debugging. +void DebugSlicesInfo(const DownloadItem::ReceivedSlices& slices); + +} // namespace download + +#endif // COMPONENTS_DOWNLOAD_INTERNAL_COMMON_PARALLEL_DOWNLOAD_UTILS_H_ diff --git a/chromium/components/download/internal/common/parallel_download_utils_unittest.cc b/chromium/components/download/internal/common/parallel_download_utils_unittest.cc new file mode 100644 index 00000000000..79818681f66 --- /dev/null +++ b/chromium/components/download/internal/common/parallel_download_utils_unittest.cc @@ -0,0 +1,400 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/common/parallel_download_utils.h" + +#include <map> +#include <memory> + +#include "base/strings/string_number_conversions.h" +#include "base/test/scoped_feature_list.h" +#include "components/download/public/common/download_features.h" +#include "components/download/public/common/download_file_impl.h" +#include "components/download/public/common/download_save_info.h" +#include "components/download/public/common/mock_input_stream.h" +#include "components/download/public/common/parallel_download_configs.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using ::testing::Return; +using ::testing::StrictMock; + +namespace download { + +namespace { + +const int kErrorStreamOffset = 100; + +} // namespace + +class ParallelDownloadUtilsTest : public testing::Test {}; + +class ParallelDownloadUtilsRecoverErrorTest + : public ::testing::TestWithParam<int64_t> { + public: + ParallelDownloadUtilsRecoverErrorTest() : input_stream_(nullptr) {} + + // Creates a source stream to test. + std::unique_ptr<DownloadFileImpl::SourceStream> CreateSourceStream( + int64_t offset, + int64_t length) { + input_stream_ = new StrictMock<MockInputStream>(); + EXPECT_CALL(*input_stream_, GetCompletionStatus()) + .WillRepeatedly(Return(DOWNLOAD_INTERRUPT_REASON_NONE)); + return std::make_unique<DownloadFileImpl::SourceStream>( + offset, length, std::unique_ptr<MockInputStream>(input_stream_)); + } + + protected: + // Stream for sending data into the SourceStream. + StrictMock<MockInputStream>* input_stream_; +}; + +TEST_F(ParallelDownloadUtilsTest, FindSlicesToDownload) { + std::vector<DownloadItem::ReceivedSlice> downloaded_slices; + std::vector<DownloadItem::ReceivedSlice> slices_to_download = + FindSlicesToDownload(downloaded_slices); + EXPECT_EQ(1u, slices_to_download.size()); + EXPECT_EQ(0, slices_to_download[0].offset); + EXPECT_EQ(DownloadSaveInfo::kLengthFullContent, + slices_to_download[0].received_bytes); + + downloaded_slices.emplace_back(0, 500); + slices_to_download = FindSlicesToDownload(downloaded_slices); + EXPECT_EQ(1u, slices_to_download.size()); + EXPECT_EQ(500, slices_to_download[0].offset); + EXPECT_EQ(DownloadSaveInfo::kLengthFullContent, + slices_to_download[0].received_bytes); + + // Create a gap between slices. + downloaded_slices.emplace_back(1000, 500); + slices_to_download = FindSlicesToDownload(downloaded_slices); + EXPECT_EQ(2u, slices_to_download.size()); + EXPECT_EQ(500, slices_to_download[0].offset); + EXPECT_EQ(500, slices_to_download[0].received_bytes); + EXPECT_EQ(1500, slices_to_download[1].offset); + EXPECT_EQ(DownloadSaveInfo::kLengthFullContent, + slices_to_download[1].received_bytes); + + // Fill the gap. + downloaded_slices.emplace(downloaded_slices.begin() + 1, + slices_to_download[0]); + slices_to_download = FindSlicesToDownload(downloaded_slices); + EXPECT_EQ(1u, slices_to_download.size()); + EXPECT_EQ(1500, slices_to_download[0].offset); + EXPECT_EQ(DownloadSaveInfo::kLengthFullContent, + slices_to_download[0].received_bytes); + + // Create a new gap at the beginning. + downloaded_slices.erase(downloaded_slices.begin()); + slices_to_download = FindSlicesToDownload(downloaded_slices); + EXPECT_EQ(2u, slices_to_download.size()); + EXPECT_EQ(0, slices_to_download[0].offset); + EXPECT_EQ(500, slices_to_download[0].received_bytes); + EXPECT_EQ(1500, slices_to_download[1].offset); + EXPECT_EQ(DownloadSaveInfo::kLengthFullContent, + slices_to_download[1].received_bytes); +} + +TEST_F(ParallelDownloadUtilsTest, AddOrMergeReceivedSliceIntoSortedArray) { + std::vector<DownloadItem::ReceivedSlice> slices; + DownloadItem::ReceivedSlice slice1(500, 500); + EXPECT_EQ(0u, AddOrMergeReceivedSliceIntoSortedArray(slice1, slices)); + EXPECT_EQ(1u, slices.size()); + EXPECT_EQ(slice1, slices[0]); + + // Adding a slice that can be merged with existing slice. + DownloadItem::ReceivedSlice slice2(1000, 400); + EXPECT_EQ(0u, AddOrMergeReceivedSliceIntoSortedArray(slice2, slices)); + EXPECT_EQ(1u, slices.size()); + EXPECT_EQ(500, slices[0].offset); + EXPECT_EQ(900, slices[0].received_bytes); + + DownloadItem::ReceivedSlice slice3(0, 50); + EXPECT_EQ(0u, AddOrMergeReceivedSliceIntoSortedArray(slice3, slices)); + EXPECT_EQ(2u, slices.size()); + EXPECT_EQ(slice3, slices[0]); + + DownloadItem::ReceivedSlice slice4(100, 50); + EXPECT_EQ(1u, AddOrMergeReceivedSliceIntoSortedArray(slice4, slices)); + EXPECT_EQ(3u, slices.size()); + EXPECT_EQ(slice3, slices[0]); + EXPECT_EQ(slice4, slices[1]); + + // A new slice can only merge with an existing slice earlier in the file, not + // later in the file. + DownloadItem::ReceivedSlice slice5(50, 50); + EXPECT_EQ(0u, AddOrMergeReceivedSliceIntoSortedArray(slice5, slices)); + EXPECT_EQ(3u, slices.size()); + EXPECT_EQ(0, slices[0].offset); + EXPECT_EQ(100, slices[0].received_bytes); + EXPECT_EQ(slice4, slices[1]); +} + +// Verify if a preceding stream can recover the download for half open error +// stream(the current last stream). +TEST_P(ParallelDownloadUtilsRecoverErrorTest, + RecoverErrorForHalfOpenErrorStream) { + // Create a stream that will work on byte range "100-". + const int kErrorStreamOffset = 100; + + auto error_stream = CreateSourceStream(kErrorStreamOffset, + DownloadSaveInfo::kLengthFullContent); + error_stream->set_finished(true); + + // Get starting offset of preceding stream. + int64_t preceding_offset = GetParam(); + EXPECT_LT(preceding_offset, kErrorStreamOffset); + auto preceding_stream = CreateSourceStream( + preceding_offset, DownloadSaveInfo::kLengthFullContent); + EXPECT_FALSE(preceding_stream->is_finished()); + EXPECT_EQ(0u, preceding_stream->bytes_written()); + EXPECT_TRUE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Half open finished preceding stream with 0 bytes written, if there is no + // error, the download should be finished. + preceding_stream->set_finished(true); + EXPECT_EQ(DOWNLOAD_INTERRUPT_REASON_NONE, + preceding_stream->GetCompletionStatus()); + EXPECT_TRUE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Half open finished preceding stream with error, should be treated as + // failed. + EXPECT_CALL(*input_stream_, GetCompletionStatus()) + .WillRepeatedly(Return(DOWNLOAD_INTERRUPT_REASON_FILE_NO_SPACE)); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Even if it has written some data. + preceding_stream->OnWriteBytesToDisk(1000u); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Now capped the length of preceding stream with different values. + preceding_stream = CreateSourceStream(preceding_offset, + kErrorStreamOffset - preceding_offset); + // Since preceding stream can't reach the first byte of the error stream, it + // will fail. + preceding_stream->set_finished(false); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->set_finished(true); + preceding_stream->OnWriteBytesToDisk(kErrorStreamOffset - preceding_offset); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Inject an error results in failure, even if data written exceeds the first + // byte of error stream. + EXPECT_CALL(*input_stream_, GetCompletionStatus()) + .WillRepeatedly(Return(DOWNLOAD_INTERRUPT_REASON_FILE_NO_SPACE)); + preceding_stream->OnWriteBytesToDisk(1000u); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Make preceding stream can reach the first byte of error stream. + preceding_stream = CreateSourceStream( + preceding_offset, kErrorStreamOffset - preceding_offset + 1); + // Since the error stream is half opened, no matter what it should fail. + preceding_stream->set_finished(false); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->set_finished(true); + preceding_stream->OnWriteBytesToDisk(kErrorStreamOffset - preceding_offset); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->OnWriteBytesToDisk(1); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Preceding stream that never download data won't recover the error stream. + preceding_stream = CreateSourceStream(preceding_offset, -1); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); +} + +// Verify recovery for length capped error stream. +// Since the error stream length is capped, assume the previous stream length +// is also capped or the previous stream is finished due to error like http +// 404. +TEST_P(ParallelDownloadUtilsRecoverErrorTest, + RecoverErrorForLengthCappedErrorStream) { + // Create a stream that will work on byte range "100-150". + const int kErrorStreamLength = 50; + auto error_stream = + CreateSourceStream(kErrorStreamOffset, kErrorStreamLength); + error_stream->set_finished(true); + + // Get starting offset of preceding stream. + const int64_t preceding_offset = GetParam(); + EXPECT_LT(preceding_offset, kErrorStreamOffset); + + // Create preceding stream capped before starting offset of error stream. + auto preceding_stream = CreateSourceStream( + preceding_offset, kErrorStreamOffset - preceding_offset); + EXPECT_FALSE(preceding_stream->is_finished()); + EXPECT_EQ(0u, preceding_stream->bytes_written()); + + // Since the preceding stream can never reach the starting offset, for an + // unfinished stream, we rely on length instead of bytes written. + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->OnWriteBytesToDisk(kErrorStreamOffset - preceding_offset); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->OnWriteBytesToDisk(kErrorStreamLength - 1); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->OnWriteBytesToDisk(1); + + // Create preceding stream that can reach the upper bound of error stream. + // Since it's unfinished, it potentially can take over error stream's work + // even if no data is written. + preceding_stream = CreateSourceStream( + preceding_offset, + kErrorStreamOffset - preceding_offset + kErrorStreamLength); + EXPECT_FALSE(preceding_stream->is_finished()); + EXPECT_EQ(0u, preceding_stream->bytes_written()); + EXPECT_TRUE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Finished preceding stream only checks data written. + preceding_stream = CreateSourceStream(preceding_offset, 1); + preceding_stream->set_finished(true); + preceding_stream->OnWriteBytesToDisk(kErrorStreamOffset - preceding_offset); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->OnWriteBytesToDisk(kErrorStreamLength - 1); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + preceding_stream->OnWriteBytesToDisk(1); + EXPECT_TRUE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Even if inject an error, since data written has cover the upper bound of + // the error stream, it should succeed. + EXPECT_CALL(*input_stream_, GetCompletionStatus()) + .WillRepeatedly(Return(DOWNLOAD_INTERRUPT_REASON_FILE_NO_SPACE)); + EXPECT_TRUE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); + + // Preceding stream that never download data won't recover the error stream. + preceding_stream = CreateSourceStream(preceding_offset, -1); + EXPECT_FALSE(CanRecoverFromError(error_stream.get(), preceding_stream.get())); +} + +// The testing value specified offset for preceding stream. The error stream +// offset is fixed value. +INSTANTIATE_TEST_CASE_P(ParallelDownloadUtilsTestSuite, + ParallelDownloadUtilsRecoverErrorTest, + ::testing::Values(0, 20, 80)); + +// Ensure the minimum slice size is correctly applied. +TEST_F(ParallelDownloadUtilsTest, FindSlicesForRemainingContentMinSliceSize) { + // Minimum slice size is smaller than total length, only one slice returned. + DownloadItem::ReceivedSlices slices = + FindSlicesForRemainingContent(0, 100, 3, 150); + EXPECT_EQ(1u, slices.size()); + EXPECT_EQ(0, slices[0].offset); + EXPECT_EQ(0, slices[0].received_bytes); + + // Request count is large, the minimum slice size should limit the number of + // slices returned. + slices = FindSlicesForRemainingContent(0, 100, 33, 50); + EXPECT_EQ(2u, slices.size()); + EXPECT_EQ(0, slices[0].offset); + EXPECT_EQ(50, slices[0].received_bytes); + EXPECT_EQ(50, slices[1].offset); + EXPECT_EQ(0, slices[1].received_bytes); + + // Can chunk 2 slices under minimum slice size, but request count is only 1, + // request count should win. + slices = FindSlicesForRemainingContent(0, 100, 1, 50); + EXPECT_EQ(1u, slices.size()); + EXPECT_EQ(0, slices[0].offset); + EXPECT_EQ(0, slices[0].received_bytes); + + // A total 100 bytes data and a 51 bytes minimum slice size, only one slice is + // returned. + slices = FindSlicesForRemainingContent(0, 100, 3, 51); + EXPECT_EQ(1u, slices.size()); + EXPECT_EQ(0, slices[0].offset); + EXPECT_EQ(0, slices[0].received_bytes); + + // Extreme case where size is smaller than request number. + slices = FindSlicesForRemainingContent(0, 1, 3, 1); + EXPECT_EQ(1u, slices.size()); + EXPECT_EQ(DownloadItem::ReceivedSlice(0, 0), slices[0]); + + // Normal case. + slices = FindSlicesForRemainingContent(0, 100, 3, 5); + EXPECT_EQ(3u, slices.size()); + EXPECT_EQ(DownloadItem::ReceivedSlice(0, 33), slices[0]); + EXPECT_EQ(DownloadItem::ReceivedSlice(33, 33), slices[1]); + EXPECT_EQ(DownloadItem::ReceivedSlice(66, 0), slices[2]); +} + +TEST_F(ParallelDownloadUtilsTest, GetMaxContiguousDataBlockSizeFromBeginning) { + std::vector<DownloadItem::ReceivedSlice> slices; + slices.emplace_back(500, 500); + EXPECT_EQ(0, GetMaxContiguousDataBlockSizeFromBeginning(slices)); + + DownloadItem::ReceivedSlice slice1(0, 200); + AddOrMergeReceivedSliceIntoSortedArray(slice1, slices); + EXPECT_EQ(200, GetMaxContiguousDataBlockSizeFromBeginning(slices)); + + DownloadItem::ReceivedSlice slice2(200, 300); + AddOrMergeReceivedSliceIntoSortedArray(slice2, slices); + EXPECT_EQ(1000, GetMaxContiguousDataBlockSizeFromBeginning(slices)); +} + +// Test to verify Finch parameters for enabled experiment group is read +// correctly. +TEST_F(ParallelDownloadUtilsTest, FinchConfigEnabled) { + base::test::ScopedFeatureList feature_list; + std::map<std::string, std::string> params = { + {kMinSliceSizeFinchKey, "1234"}, + {kParallelRequestCountFinchKey, "6"}, + {kParallelRequestDelayFinchKey, "2000"}, + {kParallelRequestRemainingTimeFinchKey, "3"}}; + feature_list.InitAndEnableFeatureWithParameters( + features::kParallelDownloading, params); + EXPECT_TRUE(IsParallelDownloadEnabled()); + EXPECT_EQ(GetMinSliceSizeConfig(), 1234); + EXPECT_EQ(GetParallelRequestCountConfig(), 6); + EXPECT_EQ(GetParallelRequestDelayConfig(), base::TimeDelta::FromSeconds(2)); + EXPECT_EQ(GetParallelRequestRemainingTimeConfig(), + base::TimeDelta::FromSeconds(3)); +} + +// Test to verify the disable experiment group will actually disable the +// feature. +TEST_F(ParallelDownloadUtilsTest, FinchConfigDisabled) { + base::test::ScopedFeatureList feature_list; + feature_list.InitAndDisableFeature(features::kParallelDownloading); + EXPECT_FALSE(IsParallelDownloadEnabled()); +} + +// Test to verify that the Finch parameter |enable_parallel_download| works +// correctly. +TEST_F(ParallelDownloadUtilsTest, FinchConfigDisabledWithParameter) { + { + base::test::ScopedFeatureList feature_list; + std::map<std::string, std::string> params = { + {kMinSliceSizeFinchKey, "4321"}, + {kEnableParallelDownloadFinchKey, "false"}}; + feature_list.InitAndEnableFeatureWithParameters( + features::kParallelDownloading, params); + // Use |enable_parallel_download| to disable parallel download in enabled + // experiment group. + EXPECT_FALSE(IsParallelDownloadEnabled()); + EXPECT_EQ(GetMinSliceSizeConfig(), 4321); + } + { + base::test::ScopedFeatureList feature_list; + std::map<std::string, std::string> params = { + {kMinSliceSizeFinchKey, "4321"}, + {kEnableParallelDownloadFinchKey, "true"}}; + feature_list.InitAndEnableFeatureWithParameters( + features::kParallelDownloading, params); + // Disable only if |enable_parallel_download| sets to false. + EXPECT_TRUE(IsParallelDownloadEnabled()); + EXPECT_EQ(GetMinSliceSizeConfig(), 4321); + } + { + base::test::ScopedFeatureList feature_list; + std::map<std::string, std::string> params = { + {kMinSliceSizeFinchKey, "4321"}}; + feature_list.InitAndEnableFeatureWithParameters( + features::kParallelDownloading, params); + // Empty |enable_parallel_download| in an enabled experiment group will have + // no impact. + EXPECT_TRUE(IsParallelDownloadEnabled()); + EXPECT_EQ(GetMinSliceSizeConfig(), 4321); + } +} + +} // namespace download diff --git a/chromium/components/download/internal/common/resource_downloader.cc b/chromium/components/download/internal/common/resource_downloader.cc new file mode 100644 index 00000000000..ab961f310e3 --- /dev/null +++ b/chromium/components/download/internal/common/resource_downloader.cc @@ -0,0 +1,226 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/resource_downloader.h" + +#include <memory> + +#include "base/strings/utf_string_conversions.h" +#include "components/download/public/common/stream_handle_input_stream.h" +#include "services/network/public/cpp/shared_url_loader_factory.h" + +namespace network { +struct ResourceResponseHead; +} + +namespace download { + +// This object monitors the URLLoaderCompletionStatus change when +// ResourceDownloader is asking |delegate_| whether download can proceed. +class URLLoaderStatusMonitor : public network::mojom::URLLoaderClient { + public: + using URLLoaderStatusChangeCallback = + base::OnceCallback<void(const network::URLLoaderCompletionStatus&)>; + explicit URLLoaderStatusMonitor(URLLoaderStatusChangeCallback callback); + ~URLLoaderStatusMonitor() override = default; + + // network::mojom::URLLoaderClient + void OnReceiveResponse( + const network::ResourceResponseHead& head, + network::mojom::DownloadedTempFilePtr downloaded_file) override {} + void OnReceiveRedirect(const net::RedirectInfo& redirect_info, + const network::ResourceResponseHead& head) override {} + void OnDataDownloaded(int64_t data_length, int64_t encoded_length) override {} + void OnUploadProgress(int64_t current_position, + int64_t total_size, + OnUploadProgressCallback callback) override {} + void OnReceiveCachedMetadata(const std::vector<uint8_t>& data) override {} + void OnTransferSizeUpdated(int32_t transfer_size_diff) override {} + void OnStartLoadingResponseBody( + mojo::ScopedDataPipeConsumerHandle body) override {} + void OnComplete(const network::URLLoaderCompletionStatus& status) override; + + private: + URLLoaderStatusChangeCallback callback_; + DISALLOW_COPY_AND_ASSIGN(URLLoaderStatusMonitor); +}; + +URLLoaderStatusMonitor::URLLoaderStatusMonitor( + URLLoaderStatusChangeCallback callback) + : callback_(std::move(callback)) {} + +void URLLoaderStatusMonitor::OnComplete( + const network::URLLoaderCompletionStatus& status) { + std::move(callback_).Run(status); +} + +// static +std::unique_ptr<ResourceDownloader> ResourceDownloader::BeginDownload( + base::WeakPtr<UrlDownloadHandler::Delegate> delegate, + std::unique_ptr<DownloadUrlParameters> params, + std::unique_ptr<network::ResourceRequest> request, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + const GURL& site_url, + const GURL& tab_url, + const GURL& tab_referrer_url, + uint32_t download_id, + bool is_parallel_request, + const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) { + auto downloader = std::make_unique<ResourceDownloader>( + delegate, std::move(request), params->render_process_host_id(), + params->render_frame_host_routing_id(), site_url, tab_url, + tab_referrer_url, download_id, task_runner, + std::move(shared_url_loader_factory)); + + downloader->Start(std::move(params), is_parallel_request); + return downloader; +} + +// static +std::unique_ptr<ResourceDownloader> +ResourceDownloader::InterceptNavigationResponse( + base::WeakPtr<UrlDownloadHandler::Delegate> delegate, + std::unique_ptr<network::ResourceRequest> resource_request, + int render_process_id, + int render_frame_id, + const GURL& site_url, + const GURL& tab_url, + const GURL& tab_referrer_url, + std::vector<GURL> url_chain, + const base::Optional<std::string>& suggested_filename, + const scoped_refptr<network::ResourceResponse>& response, + net::CertStatus cert_status, + network::mojom::URLLoaderClientEndpointsPtr url_loader_client_endpoints, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) { + auto downloader = std::make_unique<ResourceDownloader>( + delegate, std::move(resource_request), render_process_id, render_frame_id, + site_url, tab_url, tab_referrer_url, download::DownloadItem::kInvalidId, + task_runner, std::move(shared_url_loader_factory)); + downloader->InterceptResponse(std::move(response), std::move(url_chain), + suggested_filename, cert_status, + std::move(url_loader_client_endpoints)); + return downloader; +} + +ResourceDownloader::ResourceDownloader( + base::WeakPtr<UrlDownloadHandler::Delegate> delegate, + std::unique_ptr<network::ResourceRequest> resource_request, + int render_process_id, + int render_frame_id, + const GURL& site_url, + const GURL& tab_url, + const GURL& tab_referrer_url, + uint32_t download_id, + const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory) + : delegate_(delegate), + resource_request_(std::move(resource_request)), + download_id_(download_id), + render_process_id_(render_process_id), + render_frame_id_(render_frame_id), + site_url_(site_url), + tab_url_(tab_url), + tab_referrer_url_(tab_referrer_url), + delegate_task_runner_(task_runner), + shared_url_loader_factory_(std::move(shared_url_loader_factory)), + weak_ptr_factory_(this) {} + +ResourceDownloader::~ResourceDownloader() = default; + +void ResourceDownloader::Start( + std::unique_ptr<DownloadUrlParameters> download_url_parameters, + bool is_parallel_request) { + callback_ = download_url_parameters->callback(); + guid_ = download_url_parameters->guid(); + + // Set up the URLLoaderClient. + url_loader_client_ = std::make_unique<DownloadResponseHandler>( + resource_request_.get(), this, + std::make_unique<DownloadSaveInfo>( + download_url_parameters->GetSaveInfo()), + is_parallel_request, download_url_parameters->is_transient(), + download_url_parameters->fetch_error_body(), + download_url_parameters->request_headers(), + download_url_parameters->request_origin(), + download_url_parameters->download_source(), + std::vector<GURL>(1, resource_request_->url)); + network::mojom::URLLoaderClientPtr url_loader_client_ptr; + url_loader_client_binding_ = + std::make_unique<mojo::Binding<network::mojom::URLLoaderClient>>( + url_loader_client_.get(), mojo::MakeRequest(&url_loader_client_ptr)); + + // Set up the URLLoader + network::mojom::URLLoaderRequest url_loader_request = + mojo::MakeRequest(&url_loader_); + shared_url_loader_factory_->CreateLoaderAndStart( + std::move(url_loader_request), + 0, // routing_id + 0, // request_id + network::mojom::kURLLoadOptionSendSSLInfoWithResponse, + *(resource_request_.get()), std::move(url_loader_client_ptr), + net::MutableNetworkTrafficAnnotationTag( + download_url_parameters->GetNetworkTrafficAnnotation())); + url_loader_->SetPriority(net::RequestPriority::IDLE, + 0 /* intra_priority_value */); +} + +void ResourceDownloader::InterceptResponse( + const scoped_refptr<network::ResourceResponse>& response, + std::vector<GURL> url_chain, + const base::Optional<std::string>& suggested_filename, + net::CertStatus cert_status, + network::mojom::URLLoaderClientEndpointsPtr endpoints) { + // Set the URLLoader. + url_loader_.Bind(std::move(endpoints->url_loader)); + + // Create the new URLLoaderClient that will intercept the navigation. + auto save_info = std::make_unique<DownloadSaveInfo>(); + if (suggested_filename.has_value()) + save_info->suggested_name = base::UTF8ToUTF16(suggested_filename.value()); + url_loader_client_ = std::make_unique<DownloadResponseHandler>( + resource_request_.get(), this, std::move(save_info), + false, /* is_parallel_request */ + false, /* is_transient */ + false, /* fetch_error_body */ + download::DownloadUrlParameters::RequestHeadersType(), + std::string(), /* request_origin */ + download::DownloadSource::NAVIGATION, std::move(url_chain)); + + // Simulate on the new URLLoaderClient calls that happened on the old client. + response->head.cert_status = cert_status; + url_loader_client_->OnReceiveResponse( + response->head, network::mojom::DownloadedTempFilePtr()); + + // Bind the new client. + url_loader_client_binding_ = + std::make_unique<mojo::Binding<network::mojom::URLLoaderClient>>( + url_loader_client_.get(), std::move(endpoints->url_loader_client)); +} + +void ResourceDownloader::OnResponseStarted( + std::unique_ptr<DownloadCreateInfo> download_create_info, + mojom::DownloadStreamHandlePtr stream_handle) { + download_create_info->download_id = download_id_; + download_create_info->guid = guid_; + download_create_info->site_url = site_url_; + download_create_info->tab_url = tab_url_; + download_create_info->tab_referrer_url = tab_referrer_url_; + download_create_info->render_process_id = render_process_id_; + download_create_info->render_frame_id = render_frame_id_; + + delegate_task_runner_->PostTask( + FROM_HERE, + base::BindOnce( + &UrlDownloadHandler::Delegate::OnUrlDownloadStarted, delegate_, + std::move(download_create_info), + std::make_unique<StreamHandleInputStream>(std::move(stream_handle)), + std::move(shared_url_loader_factory_), callback_)); +} + +void ResourceDownloader::OnReceiveRedirect() { + url_loader_->FollowRedirect(); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/save_package_download_job.cc b/chromium/components/download/internal/common/save_package_download_job.cc new file mode 100644 index 00000000000..ae0c9277798 --- /dev/null +++ b/chromium/components/download/internal/common/save_package_download_job.cc @@ -0,0 +1,20 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/internal/common/save_package_download_job.h" + +namespace download { + +SavePackageDownloadJob::SavePackageDownloadJob( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle) + : DownloadJob(download_item, std::move(request_handle)) {} + +SavePackageDownloadJob::~SavePackageDownloadJob() = default; + +bool SavePackageDownloadJob::IsSavePackageDownload() const { + return true; +} + +} // namespace download diff --git a/chromium/components/download/internal/common/save_package_download_job.h b/chromium/components/download/internal/common/save_package_download_job.h new file mode 100644 index 00000000000..fea7fb1bbb5 --- /dev/null +++ b/chromium/components/download/internal/common/save_package_download_job.h @@ -0,0 +1,31 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_DOWNLOAD_INTERNAL_COMMON_SAVE_PACKAGE_DOWNLOAD_JOB_H_ +#define COMPONENTS_DOWNLOAD_INTERNAL_COMMON_SAVE_PACKAGE_DOWNLOAD_JOB_H_ + +#include "base/macros.h" +#include "components/download/public/common/download_item.h" +#include "components/download/public/common/download_job.h" +#include "components/download/public/common/download_request_handle_interface.h" + +namespace download { + +class SavePackageDownloadJob : public DownloadJob { + public: + SavePackageDownloadJob( + DownloadItem* download_item, + std::unique_ptr<DownloadRequestHandleInterface> request_handle); + ~SavePackageDownloadJob() override; + + // DownloadJob implementation. + bool IsSavePackageDownload() const override; + + private: + DISALLOW_COPY_AND_ASSIGN(SavePackageDownloadJob); +}; + +} // namespace download + +#endif // COMPONENTS_DOWNLOAD_INTERNAL_COMMON_SAVE_PACKAGE_DOWNLOAD_JOB_H_ diff --git a/chromium/components/download/internal/common/stream_handle_input_stream.cc b/chromium/components/download/internal/common/stream_handle_input_stream.cc new file mode 100644 index 00000000000..12a536b93bc --- /dev/null +++ b/chromium/components/download/internal/common/stream_handle_input_stream.cc @@ -0,0 +1,110 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/stream_handle_input_stream.h" + +#include "base/bind.h" +#include "components/download/public/common/download_interrupt_reasons_utils.h" +#include "mojo/public/c/system/types.h" + +namespace download { + +namespace { +// Data length to read from data pipe. +const int kBytesToRead = 4096; +} // namespace + +StreamHandleInputStream::StreamHandleInputStream( + mojom::DownloadStreamHandlePtr stream_handle) + : stream_handle_(std::move(stream_handle)), + is_response_completed_(false), + completion_status_(DOWNLOAD_INTERRUPT_REASON_NONE) { + DETACH_FROM_SEQUENCE(sequence_checker_); +} + +StreamHandleInputStream::~StreamHandleInputStream() = default; + +void StreamHandleInputStream::Initialize() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + binding_ = std::make_unique<mojo::Binding<mojom::DownloadStreamClient>>( + this, std::move(stream_handle_->client_request)); + binding_->set_connection_error_handler(base::BindOnce( + &StreamHandleInputStream::OnStreamCompleted, base::Unretained(this), + mojom::NetworkRequestStatus::USER_CANCELED)); + handle_watcher_ = std::make_unique<mojo::SimpleWatcher>( + FROM_HERE, mojo::SimpleWatcher::ArmingPolicy::AUTOMATIC, + base::SequencedTaskRunnerHandle::Get()); +} + +bool StreamHandleInputStream::IsEmpty() { + return !stream_handle_; +} + +void StreamHandleInputStream::RegisterDataReadyCallback( + const mojo::SimpleWatcher::ReadyCallback& callback) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + if (handle_watcher_) { + handle_watcher_->Watch(stream_handle_->stream.get(), + MOJO_HANDLE_SIGNAL_READABLE, callback); + } +} + +void StreamHandleInputStream::ClearDataReadyCallback() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + if (handle_watcher_) + handle_watcher_->Cancel(); +} + +void StreamHandleInputStream::RegisterCompletionCallback( + base::OnceClosure callback) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + completion_callback_ = std::move(callback); +} + +InputStream::StreamState StreamHandleInputStream::Read( + scoped_refptr<net::IOBuffer>* data, + size_t* length) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + if (!handle_watcher_) + return InputStream::EMPTY; + + *length = kBytesToRead; + *data = new net::IOBuffer(kBytesToRead); + MojoResult mojo_result = stream_handle_->stream->ReadData( + (*data)->data(), (uint32_t*)length, MOJO_READ_DATA_FLAG_NONE); + // TODO(qinmin): figure out when COMPLETE should be returned. + switch (mojo_result) { + case MOJO_RESULT_OK: + return InputStream::HAS_DATA; + case MOJO_RESULT_SHOULD_WAIT: + return InputStream::EMPTY; + case MOJO_RESULT_FAILED_PRECONDITION: + if (is_response_completed_) + return InputStream::COMPLETE; + stream_handle_->stream.reset(); + ClearDataReadyCallback(); + return InputStream::WAIT_FOR_COMPLETION; + case MOJO_RESULT_INVALID_ARGUMENT: + case MOJO_RESULT_OUT_OF_RANGE: + case MOJO_RESULT_BUSY: + NOTREACHED(); + return InputStream::COMPLETE; + } + return InputStream::EMPTY; +} + +DownloadInterruptReason StreamHandleInputStream::GetCompletionStatus() { + return completion_status_; +} + +void StreamHandleInputStream::OnStreamCompleted( + mojom::NetworkRequestStatus status) { + // This can be called before or after data pipe is completely drained. + completion_status_ = ConvertMojoNetworkRequestStatusToInterruptReason(status); + is_response_completed_ = true; + if (completion_callback_) + std::move(completion_callback_).Run(); +} + +} // namespace download diff --git a/chromium/components/download/internal/common/url_download_handler_factory.cc b/chromium/components/download/internal/common/url_download_handler_factory.cc new file mode 100644 index 00000000000..fec5bf41cd0 --- /dev/null +++ b/chromium/components/download/internal/common/url_download_handler_factory.cc @@ -0,0 +1,83 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/download/public/common/url_download_handler_factory.h" + +#include "base/no_destructor.h" +#include "base/synchronization/lock.h" +#include "components/download/public/common/download_item.h" +#include "components/download/public/common/download_utils.h" +#include "components/download/public/common/resource_downloader.h" +#include "services/network/public/cpp/shared_url_loader_factory.h" + +namespace download { + +namespace { + +// Factory for creating URLDownloadHandler used by network service. +class DefaultUrlDownloadHandlerFactory : public UrlDownloadHandlerFactory { + public: + DefaultUrlDownloadHandlerFactory() = default; + ~DefaultUrlDownloadHandlerFactory() override = default; + + protected: + UrlDownloadHandler::UniqueUrlDownloadHandlerPtr CreateUrlDownloadHandler( + std::unique_ptr<download::DownloadUrlParameters> params, + base::WeakPtr<download::UrlDownloadHandler::Delegate> delegate, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) override { + std::unique_ptr<network::ResourceRequest> request = + CreateResourceRequest(params.get()); + return UrlDownloadHandler::UniqueUrlDownloadHandlerPtr( + download::ResourceDownloader::BeginDownload( + delegate, std::move(params), std::move(request), + std::move(shared_url_loader_factory), GURL(), GURL(), GURL(), + download::DownloadItem::kInvalidId, true, task_runner) + .release(), + base::OnTaskRunnerDeleter(base::ThreadTaskRunnerHandle::Get())); + } + + private: + DISALLOW_COPY_AND_ASSIGN(DefaultUrlDownloadHandlerFactory); +}; + +UrlDownloadHandlerFactory* g_url_download_handler_factory; + +// Lock to protect |g_url_download_handler_factory| +base::Lock& GetURLDownloadHandlerFactoryLock() { + static base::NoDestructor<base::Lock> instance; + return *instance; +} + +} // namespace + +// static +UrlDownloadHandler::UniqueUrlDownloadHandlerPtr +UrlDownloadHandlerFactory::Create( + std::unique_ptr<download::DownloadUrlParameters> params, + base::WeakPtr<download::UrlDownloadHandler::Delegate> delegate, + scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory, + const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) { + base::AutoLock auto_lock(GetURLDownloadHandlerFactoryLock()); + if (!g_url_download_handler_factory) + g_url_download_handler_factory = new DefaultUrlDownloadHandlerFactory(); + return g_url_download_handler_factory->CreateUrlDownloadHandler( + std::move(params), delegate, std::move(shared_url_loader_factory), + task_runner); +} + +// static +void UrlDownloadHandlerFactory::Install(UrlDownloadHandlerFactory* factory) { + base::AutoLock auto_lock(GetURLDownloadHandlerFactoryLock()); + if (factory == g_url_download_handler_factory) + return; + delete g_url_download_handler_factory; + g_url_download_handler_factory = factory; +} + +UrlDownloadHandlerFactory::UrlDownloadHandlerFactory() = default; + +UrlDownloadHandlerFactory::~UrlDownloadHandlerFactory() = default; + +} // namespace download |