From f1ae1e39ce6b7578c5697c977bc3b52b119301ab Mon Sep 17 00:00:00 2001 From: Bob Van Landuyt Date: Mon, 13 Nov 2017 16:52:07 +0100 Subject: Move the circuitbreaker check out in a separate process Moving the check out of the general requests, makes sure we don't have any slowdown in the regular requests. To keep the process performing this checks small, the check is still performed inside a unicorn. But that is called from a process running on the same server. Because the checks are now done outside normal request, we can have a simpler failure strategy: The check is now performed in the background every `circuitbreaker_check_interval`. Failures are logged in redis. The failures are reset when the check succeeds. Per check we will try `circuitbreaker_access_retries` times within `circuitbreaker_storage_timeout` seconds. When the number of failures exceeds `circuitbreaker_failure_count_threshold`, we will block access to the storage. After `failure_reset_time` of no checks, we will clear the stored failures. This could happen when the process that performs the checks is not running. --- spec/controllers/health_controller_spec.rb | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'spec/controllers/health_controller_spec.rb') diff --git a/spec/controllers/health_controller_spec.rb b/spec/controllers/health_controller_spec.rb index 9e9cf4f2c1f..95946def5f9 100644 --- a/spec/controllers/health_controller_spec.rb +++ b/spec/controllers/health_controller_spec.rb @@ -14,6 +14,48 @@ describe HealthController do stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false') end + describe '#storage_check' do + before do + allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip) + end + + subject { post :storage_check } + + it 'checks all the configured storages' do + expect(Gitlab::Git::Storage::Checker).to receive(:check_all).and_call_original + + subject + end + + it 'returns the check interval' do + stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'true') + stub_application_setting(circuitbreaker_check_interval: 10) + + subject + + expect(json_response['check_interval']).to eq(10) + end + + context 'with failing storages', :broken_storage do + before do + stub_storage_settings( + broken: { path: 'tmp/tests/non-existent-repositories' } + ) + end + + it 'includes the failure information' do + subject + + expected_results = [ + { 'storage' => 'broken', 'success' => false }, + { 'storage' => 'default', 'success' => true } + ] + + expect(json_response['results']).to eq(expected_results) + end + end + end + describe '#readiness' do shared_context 'endpoint responding with readiness data' do let(:request_params) { {} } -- cgit v1.2.1