summaryrefslogtreecommitdiff
path: root/test/unit
diff options
context:
space:
mode:
Diffstat (limited to 'test/unit')
-rw-r--r--test/unit/__init__.py304
-rw-r--r--test/unit/account/test_reaper.py67
-rw-r--r--test/unit/common/middleware/test_dlo.py14
-rw-r--r--test/unit/common/middleware/test_slo.py35
-rw-r--r--test/unit/common/ring/test_ring.py61
-rw-r--r--test/unit/common/test_constraints.py5
-rw-r--r--test/unit/common/test_internal_client.py27
-rw-r--r--test/unit/common/test_request_helpers.py81
-rw-r--r--test/unit/common/test_storage_policy.py414
-rw-r--r--test/unit/common/test_swob.py21
-rw-r--r--test/unit/common/test_utils.py24
-rw-r--r--test/unit/common/test_wsgi.py21
-rw-r--r--test/unit/container/test_sync.py213
-rw-r--r--test/unit/obj/test_auditor.py61
-rw-r--r--test/unit/obj/test_diskfile.py4182
-rw-r--r--test/unit/obj/test_expirer.py193
-rwxr-xr-xtest/unit/obj/test_reconstructor.py2484
-rw-r--r--test/unit/obj/test_replicator.py183
-rwxr-xr-xtest/unit/obj/test_server.py1134
-rw-r--r--test/unit/obj/test_ssync_receiver.py231
-rw-r--r--test/unit/obj/test_ssync_sender.py930
-rw-r--r--test/unit/obj/test_updater.py24
-rw-r--r--test/unit/proxy/controllers/test_base.py101
-rwxr-xr-xtest/unit/proxy/controllers/test_obj.py1266
-rw-r--r--test/unit/proxy/test_mem_server.py17
-rw-r--r--test/unit/proxy/test_server.py1417
-rw-r--r--test/unit/proxy/test_sysmeta.py2
27 files changed, 11273 insertions, 2239 deletions
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index da7212c98..372fb58bb 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -22,24 +22,30 @@ import errno
import sys
from contextlib import contextmanager, closing
from collections import defaultdict, Iterable
+import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
+import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
+from swift.common.utils import Timestamp
from test import get_config
from swift.common import swob, utils
from swift.common.ring import Ring, RingData
from hashlib import md5
-from eventlet import sleep, Timeout
import logging.handlers
from httplib import HTTPException
from swift.common import storage_policy
+from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
import functools
import cPickle as pickle
from gzip import GzipFile
import mock as mocklib
+import inspect
+
+EMPTY_ETAG = md5().hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
@@ -47,26 +53,40 @@ if not os.path.basename(sys.argv[0]).startswith('swift'):
utils.HASH_PATH_SUFFIX = 'endcap'
-def patch_policies(thing_or_policies=None, legacy_only=False):
+def patch_policies(thing_or_policies=None, legacy_only=False,
+ with_ec_default=False, fake_ring_args=None):
+ if isinstance(thing_or_policies, (
+ Iterable, storage_policy.StoragePolicyCollection)):
+ return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
+
if legacy_only:
- default_policies = [storage_policy.StoragePolicy(
- 0, 'legacy', True, object_ring=FakeRing())]
+ default_policies = [
+ StoragePolicy(0, name='legacy', is_default=True),
+ ]
+ default_ring_args = [{}]
+ elif with_ec_default:
+ default_policies = [
+ ECStoragePolicy(0, name='ec', is_default=True,
+ ec_type='jerasure_rs_vand', ec_ndata=10,
+ ec_nparity=4, ec_segment_size=4096),
+ StoragePolicy(1, name='unu'),
+ ]
+ default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
- storage_policy.StoragePolicy(
- 0, 'nulo', True, object_ring=FakeRing()),
- storage_policy.StoragePolicy(
- 1, 'unu', object_ring=FakeRing()),
+ StoragePolicy(0, name='nulo', is_default=True),
+ StoragePolicy(1, name='unu'),
]
+ default_ring_args = [{}, {}]
- thing_or_policies = thing_or_policies or default_policies
+ fake_ring_args = fake_ring_args or default_ring_args
+ decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
- if isinstance(thing_or_policies, (
- Iterable, storage_policy.StoragePolicyCollection)):
- return PatchPolicies(thing_or_policies)
+ if not thing_or_policies:
+ return decorator
else:
- # it's a thing!
- return PatchPolicies(default_policies)(thing_or_policies)
+ # it's a thing, we return the wrapped thing instead of the decorator
+ return decorator(thing_or_policies)
class PatchPolicies(object):
@@ -76,11 +96,33 @@ class PatchPolicies(object):
patched yet)
"""
- def __init__(self, policies):
+ def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
+ self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
+
+ def _setup_rings(self):
+ """
+ Our tests tend to use the policies rings like their own personal
+ playground - which can be a problem in the particular case of a
+ patched TestCase class where the FakeRing objects are scoped in the
+ call to the patch_policies wrapper outside of the TestCase instance
+ which can lead to some bled state.
+
+ To help tests get better isolation without having to think about it,
+ here we're capturing the args required to *build* a new FakeRing
+ instances so we can ensure each test method gets a clean ring setup.
+
+ The TestCase can always "tweak" these fresh rings in setUp - or if
+ they'd prefer to get the same "reset" behavior with custom FakeRing's
+ they can pass in their own fake_ring_args to patch_policies instead of
+ setting the object_ring on the policy definitions.
+ """
+ for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
+ if fake_ring_arg is not None:
+ policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
@@ -89,24 +131,33 @@ class PatchPolicies(object):
return self._patch_method(thing)
def _patch_class(self, cls):
+ """
+ Creating a new class that inherits from decorated class is the more
+ common way I've seen class decorators done - but it seems to cause
+ infinite recursion when super is called from inside methods in the
+ decorated class.
+ """
- class NewClass(cls):
+ orig_setUp = cls.setUp
+ orig_tearDown = cls.tearDown
- already_patched = False
+ def setUp(cls_self):
+ self._orig_POLICIES = storage_policy._POLICIES
+ if not getattr(cls_self, '_policies_patched', False):
+ storage_policy._POLICIES = self.policies
+ self._setup_rings()
+ cls_self._policies_patched = True
- def setUp(cls_self):
- self._orig_POLICIES = storage_policy._POLICIES
- if not cls_self.already_patched:
- storage_policy._POLICIES = self.policies
- cls_self.already_patched = True
- super(NewClass, cls_self).setUp()
+ orig_setUp(cls_self)
- def tearDown(cls_self):
- super(NewClass, cls_self).tearDown()
- storage_policy._POLICIES = self._orig_POLICIES
+ def tearDown(cls_self):
+ orig_tearDown(cls_self)
+ storage_policy._POLICIES = self._orig_POLICIES
- NewClass.__name__ = cls.__name__
- return NewClass
+ cls.setUp = setUp
+ cls.tearDown = tearDown
+
+ return cls
def _patch_method(self, f):
@functools.wraps(f)
@@ -114,6 +165,7 @@ class PatchPolicies(object):
self._orig_POLICIES = storage_policy._POLICIES
try:
storage_policy._POLICIES = self.policies
+ self._setup_rings()
return f(*args, **kwargs)
finally:
storage_policy._POLICIES = self._orig_POLICIES
@@ -171,14 +223,16 @@ class FakeRing(Ring):
return self.replicas
def _get_part_nodes(self, part):
- return list(self._devs)
+ return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
# replicas^2 is the true cap
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
self.replicas * self.replicas)):
yield {'ip': '10.0.0.%s' % x,
+ 'replication_ip': '10.0.0.%s' % x,
'port': self._base_port + x,
+ 'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
@@ -206,6 +260,48 @@ def write_fake_ring(path, *devs):
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
+class FabricatedRing(Ring):
+ """
+ When a FakeRing just won't do - you can fabricate one to meet
+ your tests needs.
+ """
+
+ def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
+ part_power=4):
+ self.devices = devices
+ self.nodes = nodes
+ self.port = port
+ self.replicas = 6
+ self.part_power = part_power
+ self._part_shift = 32 - self.part_power
+ self._reload()
+
+ def _reload(self, *args, **kwargs):
+ self._rtime = time.time() * 2
+ if hasattr(self, '_replica2part2dev_id'):
+ return
+ self._devs = [{
+ 'region': 1,
+ 'zone': 1,
+ 'weight': 1.0,
+ 'id': i,
+ 'device': 'sda%d' % i,
+ 'ip': '10.0.0.%d' % (i % self.nodes),
+ 'replication_ip': '10.0.0.%d' % (i % self.nodes),
+ 'port': self.port,
+ 'replication_port': self.port,
+ } for i in range(self.devices)]
+
+ self._replica2part2dev_id = [
+ [None] * 2 ** self.part_power
+ for i in range(self.replicas)
+ ]
+ dev_ids = itertools.cycle(range(self.devices))
+ for p in range(2 ** self.part_power):
+ for r in range(self.replicas):
+ self._replica2part2dev_id[r][p] = next(dev_ids)
+
+
class FakeMemcache(object):
def __init__(self):
@@ -363,8 +459,8 @@ class UnmockTimeModule(object):
logging.time = UnmockTimeModule()
-class FakeLogger(logging.Logger):
- # a thread safe logger
+class FakeLogger(logging.Logger, object):
+ # a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
@@ -376,22 +472,31 @@ class FakeLogger(logging.Logger):
self.thread_locals = None
self.parent = None
+ store_in = {
+ logging.ERROR: 'error',
+ logging.WARNING: 'warning',
+ logging.INFO: 'info',
+ logging.DEBUG: 'debug',
+ logging.CRITICAL: 'critical',
+ }
+
+ def _log(self, level, msg, *args, **kwargs):
+ store_name = self.store_in[level]
+ cargs = [msg]
+ if any(args):
+ cargs.extend(args)
+ captured = dict(kwargs)
+ if 'exc_info' in kwargs and \
+ not isinstance(kwargs['exc_info'], tuple):
+ captured['exc_info'] = sys.exc_info()
+ self.log_dict[store_name].append((tuple(cargs), captured))
+ super(FakeLogger, self)._log(level, msg, *args, **kwargs)
+
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': []}
- def _store_in(store_name):
- def stub_fn(self, *args, **kwargs):
- self.log_dict[store_name].append((args, kwargs))
- return stub_fn
-
- def _store_and_log_in(store_name, level):
- def stub_fn(self, *args, **kwargs):
- self.log_dict[store_name].append((args, kwargs))
- self._log(level, args[0], args[1:], **kwargs)
- return stub_fn
-
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
@@ -404,16 +509,10 @@ class FakeLogger(logging.Logger):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
- error = _store_and_log_in('error', logging.ERROR)
- info = _store_and_log_in('info', logging.INFO)
- warning = _store_and_log_in('warning', logging.WARNING)
- warn = _store_and_log_in('warning', logging.WARNING)
- debug = _store_and_log_in('debug', logging.DEBUG)
-
- def exception(self, *args, **kwargs):
- self.log_dict['exception'].append((args, kwargs,
- str(sys.exc_info()[1])))
- print 'FakeLogger Exception: %s' % self.log_dict
+ def _store_in(store_name):
+ def stub_fn(self, *args, **kwargs):
+ self.log_dict[store_name].append((args, kwargs))
+ return stub_fn
# mock out the StatsD logging methods:
update_stats = _store_in('update_stats')
@@ -605,19 +704,53 @@ def mock(update):
delattr(module, attr)
+class SlowBody(object):
+ """
+ This will work with our fake_http_connect, if you hand in these
+ instead of strings it will make reads take longer by the given
+ amount. It should be a little bit easier to extend than the
+ current slow kwarg - which inserts whitespace in the response.
+ Also it should be easy to detect if you have one of these (or a
+ subclass) for the body inside of FakeConn if we wanted to do
+ something smarter than just duck-type the str/buffer api
+ enough to get by.
+ """
+
+ def __init__(self, body, slowness):
+ self.body = body
+ self.slowness = slowness
+
+ def slowdown(self):
+ eventlet.sleep(self.slowness)
+
+ def __getitem__(self, s):
+ return SlowBody(self.body[s], self.slowness)
+
+ def __len__(self):
+ return len(self.body)
+
+ def __radd__(self, other):
+ self.slowdown()
+ return other + self.body
+
+
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
- headers=None):
+ headers=None, expect_headers=None, connection_id=None,
+ give_send=None):
# connect exception
- if isinstance(status, (Exception, Timeout)):
+ if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
- self.expect_status, self.status = status
+ self.expect_status = list(status[:-1])
+ self.status = status[-1]
+ self.explicit_expect_list = True
else:
- self.expect_status, self.status = (None, status)
+ self.expect_status, self.status = ([], status)
+ self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
@@ -628,9 +761,9 @@ def fake_http_connect(*code_iter, **kwargs):
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
- self.expect_status = status
+ self.expect_status = [status]
else:
- self.expect_status = 100
+ self.expect_status = [100, 100]
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
@@ -639,32 +772,41 @@ def fake_http_connect(*code_iter, **kwargs):
self.etag = etag
self.body = body
self.headers = headers or {}
+ self.expect_headers = expect_headers or {}
self.timestamp = timestamp
+ self.connection_id = connection_id
+ self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
+ # be nice to trixy bits with node_iter's
+ eventlet.sleep()
def getresponse(self):
- if isinstance(self.status, (Exception, Timeout)):
+ if self.expect_status and self.explicit_expect_list:
+ raise Exception('Test did not consume all fake '
+ 'expect status: %r' % (self.expect_status,))
+ if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
exc = kwargs.get('raise_exc')
if exc:
- if isinstance(exc, (Exception, Timeout)):
+ if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
- raise Timeout()
+ raise eventlet.Timeout()
return self
def getexpect(self):
- if isinstance(self.expect_status, (Exception, Timeout)):
+ expect_status = self.expect_status.pop(0)
+ if isinstance(self.expect_status, (Exception, eventlet.Timeout)):
raise self.expect_status
- headers = {}
- if self.expect_status == 409:
+ headers = dict(self.expect_headers)
+ if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
- return FakeConn(self.expect_status, headers=headers)
+ return FakeConn(expect_status, headers=headers)
def getheaders(self):
etag = self.etag
@@ -717,18 +859,20 @@ def fake_http_connect(*code_iter, **kwargs):
if am_slow:
if self.sent < 4:
self.sent += 1
- sleep(value)
+ eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
+ if self.give_send:
+ self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
- sleep(value)
+ eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
@@ -738,16 +882,22 @@ def fake_http_connect(*code_iter, **kwargs):
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
- if isinstance(kwargs.get('headers'), list):
+ if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
+ if isinstance(kwargs.get('expect_headers'), (list, tuple)):
+ expect_headers_iter = iter(kwargs['expect_headers'])
+ else:
+ expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
+ len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
+ conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
@@ -755,17 +905,22 @@ def fake_http_connect(*code_iter, **kwargs):
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
- sleep(0.1)
+ eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
+ i, status = conn_id_and_code_iter.next()
if 'give_connect' in kwargs:
- kwargs['give_connect'](*args, **ckwargs)
- status = code_iter.next()
+ give_conn_fn = kwargs['give_connect']
+ argspec = inspect.getargspec(give_conn_fn)
+ if argspec.keywords or 'connection_id' in argspec.args:
+ ckwargs['connection_id'] = i
+ give_conn_fn(*args, **ckwargs)
etag = etag_iter.next()
headers = headers_iter.next()
+ expect_headers = expect_headers_iter.next()
timestamp = timestamps_iter.next()
if status <= 0:
@@ -775,7 +930,8 @@ def fake_http_connect(*code_iter, **kwargs):
else:
body = body_iter.next()
return FakeConn(status, etag, body=body, timestamp=timestamp,
- headers=headers)
+ headers=headers, expect_headers=expect_headers,
+ connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
@@ -806,3 +962,7 @@ def mocked_http_conn(*args, **kwargs):
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
+
+
+def make_timestamp_iter():
+ return iter(Timestamp(t) for t in itertools.count(int(time.time())))
diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py
index 6c1c102b8..d81b565fc 100644
--- a/test/unit/account/test_reaper.py
+++ b/test/unit/account/test_reaper.py
@@ -141,7 +141,7 @@ cont_nodes = [{'device': 'sda1',
@unit.patch_policies([StoragePolicy(0, 'zero', False,
object_ring=unit.FakeRing()),
StoragePolicy(1, 'one', True,
- object_ring=unit.FakeRing())])
+ object_ring=unit.FakeRing(replicas=4))])
class TestReaper(unittest.TestCase):
def setUp(self):
@@ -215,7 +215,7 @@ class TestReaper(unittest.TestCase):
r.stats_objects_possibly_remaining = 0
r.myips = myips
if fakelogger:
- r.logger = FakeLogger()
+ r.logger = unit.debug_logger('test-reaper')
return r
def fake_reap_account(self, *args, **kwargs):
@@ -287,7 +287,7 @@ class TestReaper(unittest.TestCase):
policy.idx)
for i, call_args in enumerate(
fake_direct_delete.call_args_list):
- cnode = cont_nodes[i]
+ cnode = cont_nodes[i % len(cont_nodes)]
host = '%(ip)s:%(port)s' % cnode
device = cnode['device']
headers = {
@@ -297,11 +297,13 @@ class TestReaper(unittest.TestCase):
'X-Backend-Storage-Policy-Index': policy.idx
}
ring = r.get_object_ring(policy.idx)
- expected = call(ring.devs[i], 0, 'a', 'c', 'o',
+ expected = call(dict(ring.devs[i], index=i), 0,
+ 'a', 'c', 'o',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
- self.assertEqual(r.stats_objects_deleted, 3)
+ self.assertEqual(r.stats_objects_deleted,
+ policy.object_ring.replicas)
def test_reap_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
@@ -312,7 +314,26 @@ class TestReaper(unittest.TestCase):
self.fake_direct_delete_object):
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
- self.assertEqual(r.stats_objects_deleted, 1)
+ # IMHO, the stat handling in the node loop of reap object is
+ # over indented, but no one has complained, so I'm not inclined
+ # to move it. However it's worth noting we're currently keeping
+ # stats on deletes per *replica* - which is rather obvious from
+ # these tests, but this results is surprising because of some
+ # funny logic to *skip* increments on successful deletes of
+ # replicas until we have more successful responses than
+ # failures. This means that while the first replica doesn't
+ # increment deleted because of the failure, the second one
+ # *does* get successfully deleted, but *also does not* increment
+ # the counter (!?).
+ #
+ # In the three replica case this leaves only the last deleted
+ # object incrementing the counter - in the four replica case
+ # this leaves the last two.
+ #
+ # Basically this test will always result in:
+ # deleted == num_replicas - 2
+ self.assertEqual(r.stats_objects_deleted,
+ policy.object_ring.replicas - 2)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 1)
@@ -347,7 +368,7 @@ class TestReaper(unittest.TestCase):
mocks['direct_get_container'].side_effect = fake_get_container
r.reap_container('a', 'partition', acc_nodes, 'c')
mock_calls = mocks['direct_delete_object'].call_args_list
- self.assertEqual(3, len(mock_calls))
+ self.assertEqual(policy.object_ring.replicas, len(mock_calls))
for call_args in mock_calls:
_args, kwargs = call_args
self.assertEqual(kwargs['headers']
@@ -355,7 +376,7 @@ class TestReaper(unittest.TestCase):
policy.idx)
self.assertEquals(mocks['direct_delete_container'].call_count, 3)
- self.assertEqual(r.stats_objects_deleted, 3)
+ self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas)
def test_reap_container_get_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
@@ -373,7 +394,7 @@ class TestReaper(unittest.TestCase):
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
- self.assertEqual(r.logger.inc['return_codes.4'], 1)
+ self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 1)
self.assertEqual(r.stats_containers_deleted, 1)
def test_reap_container_partial_fail(self):
@@ -392,7 +413,7 @@ class TestReaper(unittest.TestCase):
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
- self.assertEqual(r.logger.inc['return_codes.4'], 2)
+ self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 2)
self.assertEqual(r.stats_containers_possibly_remaining, 1)
def test_reap_container_full_fail(self):
@@ -411,7 +432,7 @@ class TestReaper(unittest.TestCase):
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
- self.assertEqual(r.logger.inc['return_codes.4'], 3)
+ self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 3)
self.assertEqual(r.stats_containers_remaining, 1)
@patch('swift.account.reaper.Ring',
@@ -436,8 +457,8 @@ class TestReaper(unittest.TestCase):
mocks['direct_get_container'].side_effect = fake_get_container
r.reap_container('a', 'partition', acc_nodes, 'c')
- self.assertEqual(r.logger.msg,
- 'ERROR: invalid storage policy index: 2')
+ self.assertEqual(r.logger.get_lines_for_level('error'), [
+ 'ERROR: invalid storage policy index: 2'])
def fake_reap_container(self, *args, **kwargs):
self.called_amount += 1
@@ -462,13 +483,16 @@ class TestReaper(unittest.TestCase):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
self.assertEqual(self.called_amount, 4)
- self.assertEqual(r.logger.msg.find('Completed pass'), 0)
- self.assertTrue(r.logger.msg.find('1 containers deleted'))
- self.assertTrue(r.logger.msg.find('1 objects deleted'))
- self.assertTrue(r.logger.msg.find('1 containers remaining'))
- self.assertTrue(r.logger.msg.find('1 objects remaining'))
- self.assertTrue(r.logger.msg.find('1 containers possibly remaining'))
- self.assertTrue(r.logger.msg.find('1 objects possibly remaining'))
+ info_lines = r.logger.get_lines_for_level('info')
+ self.assertEqual(len(info_lines), 2)
+ start_line, stat_line = info_lines
+ self.assertEqual(start_line, 'Beginning pass on account a')
+ self.assertTrue(stat_line.find('1 containers deleted'))
+ self.assertTrue(stat_line.find('1 objects deleted'))
+ self.assertTrue(stat_line.find('1 containers remaining'))
+ self.assertTrue(stat_line.find('1 objects remaining'))
+ self.assertTrue(stat_line.find('1 containers possibly remaining'))
+ self.assertTrue(stat_line.find('1 objects possibly remaining'))
def test_reap_account_no_container(self):
broker = FakeAccountBroker(tuple())
@@ -482,7 +506,8 @@ class TestReaper(unittest.TestCase):
with nested(*ctx):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
- self.assertEqual(r.logger.msg.find('Completed pass'), 0)
+ self.assertTrue(r.logger.get_lines_for_level(
+ 'info')[-1].startswith('Completed pass'))
self.assertEqual(self.called_amount, 0)
def test_reap_device(self):
diff --git a/test/unit/common/middleware/test_dlo.py b/test/unit/common/middleware/test_dlo.py
index a292bc92b..16237eb1d 100644
--- a/test/unit/common/middleware/test_dlo.py
+++ b/test/unit/common/middleware/test_dlo.py
@@ -564,9 +564,10 @@ class TestDloGetManifest(DloTestCase):
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "409 Conflict")
- err_log = self.dlo.logger.log_dict['exception'][0][0][0]
- self.assertTrue(err_log.startswith('ERROR: An error occurred '
- 'while retrieving segments'))
+ err_lines = self.dlo.logger.get_lines_for_level('error')
+ self.assertEqual(len(err_lines), 1)
+ self.assertTrue(err_lines[0].startswith(
+ 'ERROR: An error occurred while retrieving segments'))
def test_error_fetching_second_segment(self):
self.app.register(
@@ -581,9 +582,10 @@ class TestDloGetManifest(DloTestCase):
self.assertTrue(isinstance(exc, exceptions.SegmentError))
self.assertEqual(status, "200 OK")
self.assertEqual(''.join(body), "aaaaa") # first segment made it out
- err_log = self.dlo.logger.log_dict['exception'][0][0][0]
- self.assertTrue(err_log.startswith('ERROR: An error occurred '
- 'while retrieving segments'))
+ err_lines = self.dlo.logger.get_lines_for_level('error')
+ self.assertEqual(len(err_lines), 1)
+ self.assertTrue(err_lines[0].startswith(
+ 'ERROR: An error occurred while retrieving segments'))
def test_error_listing_container_first_listing_request(self):
self.app.register(
diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py
index f4bac155c..4160d91d4 100644
--- a/test/unit/common/middleware/test_slo.py
+++ b/test/unit/common/middleware/test_slo.py
@@ -1431,9 +1431,10 @@ class TestSloGetManifest(SloTestCase):
self.assertEqual(status, '409 Conflict')
self.assertEqual(self.app.call_count, 10)
- err_log = self.slo.logger.log_dict['exception'][0][0][0]
- self.assertTrue(err_log.startswith('ERROR: An error occurred '
- 'while retrieving segments'))
+ error_lines = self.slo.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ self.assertTrue(error_lines[0].startswith(
+ 'ERROR: An error occurred while retrieving segments'))
def test_get_with_if_modified_since(self):
# It's important not to pass the If-[Un]Modified-Since header to the
@@ -1508,9 +1509,10 @@ class TestSloGetManifest(SloTestCase):
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
- err_log = self.slo.logger.log_dict['exception'][0][0][0]
- self.assertTrue(err_log.startswith('ERROR: An error occurred '
- 'while retrieving segments'))
+ error_lines = self.slo.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ self.assertTrue(error_lines[0].startswith(
+ 'ERROR: An error occurred while retrieving segments'))
def test_invalid_json_submanifest(self):
self.app.register(
@@ -1585,9 +1587,10 @@ class TestSloGetManifest(SloTestCase):
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
- err_log = self.slo.logger.log_dict['exception'][0][0][0]
- self.assertTrue(err_log.startswith('ERROR: An error occurred '
- 'while retrieving segments'))
+ error_lines = self.slo.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ self.assertTrue(error_lines[0].startswith(
+ 'ERROR: An error occurred while retrieving segments'))
def test_first_segment_mismatched_size(self):
self.app.register('GET', '/v1/AUTH_test/gettest/manifest-badsize',
@@ -1603,9 +1606,10 @@ class TestSloGetManifest(SloTestCase):
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
- err_log = self.slo.logger.log_dict['exception'][0][0][0]
- self.assertTrue(err_log.startswith('ERROR: An error occurred '
- 'while retrieving segments'))
+ error_lines = self.slo.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ self.assertTrue(error_lines[0].startswith(
+ 'ERROR: An error occurred while retrieving segments'))
def test_download_takes_too_long(self):
the_time = [time.time()]
@@ -1657,9 +1661,10 @@ class TestSloGetManifest(SloTestCase):
status, headers, body = self.call_slo(req)
self.assertEqual('409 Conflict', status)
- err_log = self.slo.logger.log_dict['exception'][0][0][0]
- self.assertTrue(err_log.startswith('ERROR: An error occurred '
- 'while retrieving segments'))
+ error_lines = self.slo.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ self.assertTrue(error_lines[0].startswith(
+ 'ERROR: An error occurred while retrieving segments'))
class TestSloBulkLogger(unittest.TestCase):
diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py
index fff715785..b97b60eee 100644
--- a/test/unit/common/ring/test_ring.py
+++ b/test/unit/common/ring/test_ring.py
@@ -363,63 +363,74 @@ class TestRing(TestRingBase):
self.assertRaises(TypeError, self.ring.get_nodes)
part, nodes = self.ring.get_nodes('a')
self.assertEquals(part, 0)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a1')
self.assertEquals(part, 0)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a4')
self.assertEquals(part, 1)
- self.assertEquals(nodes, [self.intended_devs[1],
- self.intended_devs[4]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[1],
+ self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('aa')
self.assertEquals(part, 1)
- self.assertEquals(nodes, [self.intended_devs[1],
- self.intended_devs[4]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[1],
+ self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('a', 'c1')
self.assertEquals(part, 0)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c0')
self.assertEquals(part, 3)
- self.assertEquals(nodes, [self.intended_devs[1],
- self.intended_devs[4]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[1],
+ self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('a', 'c3')
self.assertEquals(part, 2)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c2')
- self.assertEquals(part, 2)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o1')
self.assertEquals(part, 1)
- self.assertEquals(nodes, [self.intended_devs[1],
- self.intended_devs[4]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[1],
+ self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o5')
self.assertEquals(part, 0)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o0')
self.assertEquals(part, 0)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
self.assertEquals(part, 2)
- self.assertEquals(nodes, [self.intended_devs[0],
- self.intended_devs[3]])
+ self.assertEquals(nodes, [dict(node, index=i) for i, node in
+ enumerate([self.intended_devs[0],
+ self.intended_devs[3]])])
def add_dev_to_ring(self, new_dev):
self.ring.devs.append(new_dev)
diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py
index 7ae9fb44a..61231d3f0 100644
--- a/test/unit/common/test_constraints.py
+++ b/test/unit/common/test_constraints.py
@@ -368,6 +368,11 @@ class TestConstraints(unittest.TestCase):
self.assertTrue('X-Delete-At' in req.headers)
self.assertEqual(req.headers['X-Delete-At'], expected)
+ def test_check_dir(self):
+ self.assertFalse(constraints.check_dir('', ''))
+ with mock.patch("os.path.isdir", MockTrue()):
+ self.assertTrue(constraints.check_dir('/srv', 'foo/bar'))
+
def test_check_mount(self):
self.assertFalse(constraints.check_mount('', ''))
with mock.patch("swift.common.utils.ismount", MockTrue()):
diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py
index d4027261d..b7d680688 100644
--- a/test/unit/common/test_internal_client.py
+++ b/test/unit/common/test_internal_client.py
@@ -235,19 +235,20 @@ class TestInternalClient(unittest.TestCase):
write_fake_ring(object_ring_path)
with patch_policies([StoragePolicy(0, 'legacy', True)]):
client = internal_client.InternalClient(conf_path, 'test', 1)
- self.assertEqual(client.account_ring, client.app.app.app.account_ring)
- self.assertEqual(client.account_ring.serialized_path,
- account_ring_path)
- self.assertEqual(client.container_ring,
- client.app.app.app.container_ring)
- self.assertEqual(client.container_ring.serialized_path,
- container_ring_path)
- object_ring = client.app.app.app.get_object_ring(0)
- self.assertEqual(client.get_object_ring(0),
- object_ring)
- self.assertEqual(object_ring.serialized_path,
- object_ring_path)
- self.assertEquals(client.auto_create_account_prefix, '-')
+ self.assertEqual(client.account_ring,
+ client.app.app.app.account_ring)
+ self.assertEqual(client.account_ring.serialized_path,
+ account_ring_path)
+ self.assertEqual(client.container_ring,
+ client.app.app.app.container_ring)
+ self.assertEqual(client.container_ring.serialized_path,
+ container_ring_path)
+ object_ring = client.app.app.app.get_object_ring(0)
+ self.assertEqual(client.get_object_ring(0),
+ object_ring)
+ self.assertEqual(object_ring.serialized_path,
+ object_ring_path)
+ self.assertEquals(client.auto_create_account_prefix, '-')
def test_init(self):
class App(object):
diff --git a/test/unit/common/test_request_helpers.py b/test/unit/common/test_request_helpers.py
index c87a39979..d2dc02c48 100644
--- a/test/unit/common/test_request_helpers.py
+++ b/test/unit/common/test_request_helpers.py
@@ -16,10 +16,13 @@
"""Tests for swift.common.request_helpers"""
import unittest
-from swift.common.swob import Request
+from swift.common.swob import Request, HTTPException
+from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from swift.common.request_helpers import is_sys_meta, is_user_meta, \
is_sys_or_user_meta, strip_sys_meta_prefix, strip_user_meta_prefix, \
- remove_items, copy_header_subset
+ remove_items, copy_header_subset, get_name_and_placement
+
+from test.unit import patch_policies
server_types = ['account', 'container', 'object']
@@ -81,3 +84,77 @@ class TestRequestHelpers(unittest.TestCase):
self.assertEqual(to_req.headers['A'], 'b')
self.assertFalse('c' in to_req.headers)
self.assertFalse('C' in to_req.headers)
+
+ @patch_policies(with_ec_default=True)
+ def test_get_name_and_placement_object_req(self):
+ path = '/device/part/account/container/object'
+ req = Request.blank(path, headers={
+ 'X-Backend-Storage-Policy-Index': '0'})
+ device, part, account, container, obj, policy = \
+ get_name_and_placement(req, 5, 5, True)
+ self.assertEqual(device, 'device')
+ self.assertEqual(part, 'part')
+ self.assertEqual(account, 'account')
+ self.assertEqual(container, 'container')
+ self.assertEqual(obj, 'object')
+ self.assertEqual(policy, POLICIES[0])
+ self.assertEqual(policy.policy_type, EC_POLICY)
+
+ req.headers['X-Backend-Storage-Policy-Index'] = 1
+ device, part, account, container, obj, policy = \
+ get_name_and_placement(req, 5, 5, True)
+ self.assertEqual(device, 'device')
+ self.assertEqual(part, 'part')
+ self.assertEqual(account, 'account')
+ self.assertEqual(container, 'container')
+ self.assertEqual(obj, 'object')
+ self.assertEqual(policy, POLICIES[1])
+ self.assertEqual(policy.policy_type, REPL_POLICY)
+
+ req.headers['X-Backend-Storage-Policy-Index'] = 'foo'
+ try:
+ device, part, account, container, obj, policy = \
+ get_name_and_placement(req, 5, 5, True)
+ except HTTPException as e:
+ self.assertEqual(e.status_int, 503)
+ self.assertEqual(str(e), '503 Service Unavailable')
+ self.assertEqual(e.body, "No policy with index foo")
+ else:
+ self.fail('get_name_and_placement did not raise error '
+ 'for invalid storage policy index')
+
+ @patch_policies(with_ec_default=True)
+ def test_get_name_and_placement_object_replication(self):
+ # yup, suffixes are sent '-'.joined in the path
+ path = '/device/part/012-345-678-9ab-cde'
+ req = Request.blank(path, headers={
+ 'X-Backend-Storage-Policy-Index': '0'})
+ device, partition, suffix_parts, policy = \
+ get_name_and_placement(req, 2, 3, True)
+ self.assertEqual(device, 'device')
+ self.assertEqual(partition, 'part')
+ self.assertEqual(suffix_parts, '012-345-678-9ab-cde')
+ self.assertEqual(policy, POLICIES[0])
+ self.assertEqual(policy.policy_type, EC_POLICY)
+
+ path = '/device/part'
+ req = Request.blank(path, headers={
+ 'X-Backend-Storage-Policy-Index': '1'})
+ device, partition, suffix_parts, policy = \
+ get_name_and_placement(req, 2, 3, True)
+ self.assertEqual(device, 'device')
+ self.assertEqual(partition, 'part')
+ self.assertEqual(suffix_parts, None) # false-y
+ self.assertEqual(policy, POLICIES[1])
+ self.assertEqual(policy.policy_type, REPL_POLICY)
+
+ path = '/device/part/' # with a trailing slash
+ req = Request.blank(path, headers={
+ 'X-Backend-Storage-Policy-Index': '1'})
+ device, partition, suffix_parts, policy = \
+ get_name_and_placement(req, 2, 3, True)
+ self.assertEqual(device, 'device')
+ self.assertEqual(partition, 'part')
+ self.assertEqual(suffix_parts, '') # still false-y
+ self.assertEqual(policy, POLICIES[1])
+ self.assertEqual(policy.policy_type, REPL_POLICY)
diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py
index 21fed77ee..6406dc192 100644
--- a/test/unit/common/test_storage_policy.py
+++ b/test/unit/common/test_storage_policy.py
@@ -19,8 +19,23 @@ import mock
from tempfile import NamedTemporaryFile
from test.unit import patch_policies, FakeRing
from swift.common.storage_policy import (
- StoragePolicy, StoragePolicyCollection, POLICIES, PolicyError,
- parse_storage_policies, reload_storage_policies, get_policy_string)
+ StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies,
+ reload_storage_policies, get_policy_string, split_policy_string,
+ BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY,
+ VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE)
+from swift.common.exceptions import RingValidationError
+
+
+@BaseStoragePolicy.register('fake')
+class FakeStoragePolicy(BaseStoragePolicy):
+ """
+ Test StoragePolicy class - the only user at the moment is
+ test_validate_policies_type_invalid()
+ """
+ def __init__(self, idx, name='', is_default=False, is_deprecated=False,
+ object_ring=None):
+ super(FakeStoragePolicy, self).__init__(
+ idx, name, is_default, is_deprecated, object_ring)
class TestStoragePolicies(unittest.TestCase):
@@ -31,15 +46,35 @@ class TestStoragePolicies(unittest.TestCase):
conf.readfp(StringIO.StringIO(conf_str))
return conf
- @patch_policies([StoragePolicy(0, 'zero', True),
- StoragePolicy(1, 'one', False),
- StoragePolicy(2, 'two', False),
- StoragePolicy(3, 'three', False, is_deprecated=True)])
+ def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
+ try:
+ f(*args, **kwargs)
+ except exc_class as err:
+ err_msg = str(err)
+ self.assert_(message in err_msg, 'Error message %r did not '
+ 'have expected substring %r' % (err_msg, message))
+ else:
+ self.fail('%r did not raise %s' % (message, exc_class.__name__))
+
+ def test_policy_baseclass_instantiate(self):
+ self.assertRaisesWithMessage(TypeError,
+ "Can't instantiate BaseStoragePolicy",
+ BaseStoragePolicy, 1, 'one')
+
+ @patch_policies([
+ StoragePolicy(0, 'zero', is_default=True),
+ StoragePolicy(1, 'one'),
+ StoragePolicy(2, 'two'),
+ StoragePolicy(3, 'three', is_deprecated=True),
+ ECStoragePolicy(10, 'ten', ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=4),
+ ])
def test_swift_info(self):
# the deprecated 'three' should not exist in expect
expect = [{'default': True, 'name': 'zero'},
{'name': 'two'},
- {'name': 'one'}]
+ {'name': 'one'},
+ {'name': 'ten'}]
swift_info = POLICIES.get_policy_info()
self.assertEquals(sorted(expect, key=lambda k: k['name']),
sorted(swift_info, key=lambda k: k['name']))
@@ -48,10 +83,48 @@ class TestStoragePolicies(unittest.TestCase):
def test_get_policy_string(self):
self.assertEquals(get_policy_string('something', 0), 'something')
self.assertEquals(get_policy_string('something', None), 'something')
+ self.assertEquals(get_policy_string('something', ''), 'something')
self.assertEquals(get_policy_string('something', 1),
'something' + '-1')
self.assertRaises(PolicyError, get_policy_string, 'something', 99)
+ @patch_policies
+ def test_split_policy_string(self):
+ expectations = {
+ 'something': ('something', POLICIES[0]),
+ 'something-1': ('something', POLICIES[1]),
+ 'tmp': ('tmp', POLICIES[0]),
+ 'objects': ('objects', POLICIES[0]),
+ 'tmp-1': ('tmp', POLICIES[1]),
+ 'objects-1': ('objects', POLICIES[1]),
+ 'objects-': PolicyError,
+ 'objects-0': PolicyError,
+ 'objects--1': ('objects-', POLICIES[1]),
+ 'objects-+1': PolicyError,
+ 'objects--': PolicyError,
+ 'objects-foo': PolicyError,
+ 'objects--bar': PolicyError,
+ 'objects-+bar': PolicyError,
+ # questionable, demonstrated as inverse of get_policy_string
+ 'objects+0': ('objects+0', POLICIES[0]),
+ '': ('', POLICIES[0]),
+ '0': ('0', POLICIES[0]),
+ '-1': ('', POLICIES[1]),
+ }
+ for policy_string, expected in expectations.items():
+ if expected == PolicyError:
+ try:
+ invalid = split_policy_string(policy_string)
+ except PolicyError:
+ continue # good
+ else:
+ self.fail('The string %r returned %r '
+ 'instead of raising a PolicyError' %
+ (policy_string, invalid))
+ self.assertEqual(expected, split_policy_string(policy_string))
+ # should be inverse of get_policy_string
+ self.assertEqual(policy_string, get_policy_string(*expected))
+
def test_defaults(self):
self.assertTrue(len(POLICIES) > 0)
@@ -66,7 +139,9 @@ class TestStoragePolicies(unittest.TestCase):
def test_storage_policy_repr(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
- StoragePolicy(2, 'cee', False)]
+ StoragePolicy(2, 'cee', False),
+ ECStoragePolicy(10, 'ten', ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=3)]
policies = StoragePolicyCollection(test_policies)
for policy in policies:
policy_repr = repr(policy)
@@ -75,6 +150,13 @@ class TestStoragePolicies(unittest.TestCase):
self.assert_('is_deprecated=%s' % policy.is_deprecated in
policy_repr)
self.assert_(policy.name in policy_repr)
+ if policy.policy_type == EC_POLICY:
+ self.assert_('ec_type=%s' % policy.ec_type in policy_repr)
+ self.assert_('ec_ndata=%s' % policy.ec_ndata in policy_repr)
+ self.assert_('ec_nparity=%s' %
+ policy.ec_nparity in policy_repr)
+ self.assert_('ec_segment_size=%s' %
+ policy.ec_segment_size in policy_repr)
collection_repr = repr(policies)
collection_repr_lines = collection_repr.splitlines()
self.assert_(policies.__class__.__name__ in collection_repr_lines[0])
@@ -157,15 +239,16 @@ class TestStoragePolicies(unittest.TestCase):
def test_validate_policy_params(self):
StoragePolicy(0, 'name') # sanity
# bogus indexes
- self.assertRaises(PolicyError, StoragePolicy, 'x', 'name')
- self.assertRaises(PolicyError, StoragePolicy, -1, 'name')
+ self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name')
+ self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name')
+
# non-zero Policy-0
- self.assertRaisesWithMessage(PolicyError, 'reserved', StoragePolicy,
- 1, 'policy-0')
+ self.assertRaisesWithMessage(PolicyError, 'reserved',
+ FakeStoragePolicy, 1, 'policy-0')
# deprecate default
self.assertRaisesWithMessage(
PolicyError, 'Deprecated policy can not be default',
- StoragePolicy, 1, 'Policy-1', is_default=True,
+ FakeStoragePolicy, 1, 'Policy-1', is_default=True,
is_deprecated=True)
# weird names
names = (
@@ -178,7 +261,7 @@ class TestStoragePolicies(unittest.TestCase):
)
for name in names:
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
- StoragePolicy, 1, name)
+ FakeStoragePolicy, 1, name)
def test_validate_policies_names(self):
# duplicate names
@@ -188,6 +271,40 @@ class TestStoragePolicies(unittest.TestCase):
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
+ def test_validate_policies_type_default(self):
+ # no type specified - make sure the policy is initialized to
+ # DEFAULT_POLICY_TYPE
+ test_policy = FakeStoragePolicy(0, 'zero', True)
+ self.assertEquals(test_policy.policy_type, 'fake')
+
+ def test_validate_policies_type_invalid(self):
+ class BogusStoragePolicy(FakeStoragePolicy):
+ policy_type = 'bogus'
+ # unsupported policy type - initialization with FakeStoragePolicy
+ self.assertRaisesWithMessage(PolicyError, 'Invalid type',
+ BogusStoragePolicy, 1, 'one')
+
+ def test_policies_type_attribute(self):
+ test_policies = [
+ StoragePolicy(0, 'zero', is_default=True),
+ StoragePolicy(1, 'one'),
+ StoragePolicy(2, 'two'),
+ StoragePolicy(3, 'three', is_deprecated=True),
+ ECStoragePolicy(10, 'ten', ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=3),
+ ]
+ policies = StoragePolicyCollection(test_policies)
+ self.assertEquals(policies.get_by_index(0).policy_type,
+ REPL_POLICY)
+ self.assertEquals(policies.get_by_index(1).policy_type,
+ REPL_POLICY)
+ self.assertEquals(policies.get_by_index(2).policy_type,
+ REPL_POLICY)
+ self.assertEquals(policies.get_by_index(3).policy_type,
+ REPL_POLICY)
+ self.assertEquals(policies.get_by_index(10).policy_type,
+ EC_POLICY)
+
def test_names_are_normalized(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'ZERO', False)]
@@ -207,16 +324,6 @@ class TestStoragePolicies(unittest.TestCase):
self.assertEqual(pol1, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'One')
- def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
- try:
- f(*args, **kwargs)
- except exc_class as err:
- err_msg = str(err)
- self.assert_(message in err_msg, 'Error message %r did not '
- 'have expected substring %r' % (err_msg, message))
- else:
- self.fail('%r did not raise %s' % (message, exc_class.__name__))
-
def test_deprecated_default(self):
bad_conf = self._conf("""
[storage-policy:1]
@@ -395,6 +502,133 @@ class TestStoragePolicies(unittest.TestCase):
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
+ # policy_type = erasure_coding
+
+ # missing ec_type, ec_num_data_fragments and ec_num_parity_fragments
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ """)
+
+ self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
+ parse_storage_policies, bad_conf)
+
+ # missing ec_type, but other options valid...
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ ec_num_data_fragments = 10
+ ec_num_parity_fragments = 4
+ """)
+
+ self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
+ parse_storage_policies, bad_conf)
+
+ # ec_type specified, but invalid...
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ default = yes
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ ec_type = garbage_alg
+ ec_num_data_fragments = 10
+ ec_num_parity_fragments = 4
+ """)
+
+ self.assertRaisesWithMessage(PolicyError,
+ 'Wrong ec_type garbage_alg for policy '
+ 'ec10-4, should be one of "%s"' %
+ (', '.join(VALID_EC_TYPES)),
+ parse_storage_policies, bad_conf)
+
+ # missing and invalid ec_num_parity_fragments
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ ec_type = jerasure_rs_vand
+ ec_num_data_fragments = 10
+ """)
+
+ self.assertRaisesWithMessage(PolicyError,
+ 'Invalid ec_num_parity_fragments',
+ parse_storage_policies, bad_conf)
+
+ for num_parity in ('-4', '0', 'x'):
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ ec_type = jerasure_rs_vand
+ ec_num_data_fragments = 10
+ ec_num_parity_fragments = %s
+ """ % num_parity)
+
+ self.assertRaisesWithMessage(PolicyError,
+ 'Invalid ec_num_parity_fragments',
+ parse_storage_policies, bad_conf)
+
+ # missing and invalid ec_num_data_fragments
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ ec_type = jerasure_rs_vand
+ ec_num_parity_fragments = 4
+ """)
+
+ self.assertRaisesWithMessage(PolicyError,
+ 'Invalid ec_num_data_fragments',
+ parse_storage_policies, bad_conf)
+
+ for num_data in ('-10', '0', 'x'):
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ ec_type = jerasure_rs_vand
+ ec_num_data_fragments = %s
+ ec_num_parity_fragments = 4
+ """ % num_data)
+
+ self.assertRaisesWithMessage(PolicyError,
+ 'Invalid ec_num_data_fragments',
+ parse_storage_policies, bad_conf)
+
+ # invalid ec_object_segment_size
+ for segment_size in ('-4', '0', 'x'):
+ bad_conf = self._conf("""
+ [storage-policy:0]
+ name = zero
+ [storage-policy:1]
+ name = ec10-4
+ policy_type = erasure_coding
+ ec_object_segment_size = %s
+ ec_type = jerasure_rs_vand
+ ec_num_data_fragments = 10
+ ec_num_parity_fragments = 4
+ """ % segment_size)
+
+ self.assertRaisesWithMessage(PolicyError,
+ 'Invalid ec_object_segment_size',
+ parse_storage_policies, bad_conf)
+
# Additional section added to ensure parser ignores other sections
conf = self._conf("""
[some-other-section]
@@ -430,6 +664,8 @@ class TestStoragePolicies(unittest.TestCase):
self.assertEquals("zero", policies.get_by_index(None).name)
self.assertEquals("zero", policies.get_by_index('').name)
+ self.assertEqual(policies.get_by_index(0), policies.legacy)
+
def test_reload_invalid_storage_policies(self):
conf = self._conf("""
[storage-policy:0]
@@ -512,18 +748,124 @@ class TestStoragePolicies(unittest.TestCase):
for policy in POLICIES:
self.assertEqual(POLICIES[int(policy)], policy)
- def test_storage_policy_get_options(self):
- policy = StoragePolicy(1, 'gold', True, False)
- self.assertEqual({'name': 'gold',
- 'default': True,
- 'deprecated': False},
- policy.get_options())
-
- policy = StoragePolicy(1, 'gold', False, True)
- self.assertEqual({'name': 'gold',
- 'default': False,
- 'deprecated': True},
- policy.get_options())
+ def test_quorum_size_replication(self):
+ expected_sizes = {1: 1,
+ 2: 2,
+ 3: 2,
+ 4: 3,
+ 5: 3}
+ for n, expected in expected_sizes.items():
+ policy = StoragePolicy(0, 'zero',
+ object_ring=FakeRing(replicas=n))
+ self.assertEqual(policy.quorum, expected)
+
+ def test_quorum_size_erasure_coding(self):
+ test_ec_policies = [
+ ECStoragePolicy(10, 'ec8-2', ec_type='jerasure_rs_vand',
+ ec_ndata=8, ec_nparity=2),
+ ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4',
+ ec_ndata=10, ec_nparity=6),
+ ]
+ for ec_policy in test_ec_policies:
+ k = ec_policy.ec_ndata
+ expected_size = \
+ k + ec_policy.pyeclib_driver.min_parity_fragments_needed()
+ self.assertEqual(expected_size, ec_policy.quorum)
+
+ def test_validate_ring(self):
+ test_policies = [
+ ECStoragePolicy(0, 'ec8-2', ec_type='jerasure_rs_vand',
+ ec_ndata=8, ec_nparity=2,
+ object_ring=FakeRing(replicas=8),
+ is_default=True),
+ ECStoragePolicy(1, 'ec10-4', ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=4,
+ object_ring=FakeRing(replicas=10)),
+ ECStoragePolicy(2, 'ec4-2', ec_type='jerasure_rs_vand',
+ ec_ndata=4, ec_nparity=2,
+ object_ring=FakeRing(replicas=7)),
+ ]
+ policies = StoragePolicyCollection(test_policies)
+
+ for policy in policies:
+ msg = 'EC ring for policy %s needs to be configured with ' \
+ 'exactly %d nodes.' % \
+ (policy.name, policy.ec_ndata + policy.ec_nparity)
+ self.assertRaisesWithMessage(
+ RingValidationError, msg,
+ policy._validate_ring)
+
+ def test_storage_policy_get_info(self):
+ test_policies = [
+ StoragePolicy(0, 'zero', is_default=True),
+ StoragePolicy(1, 'one', is_deprecated=True),
+ ECStoragePolicy(10, 'ten',
+ ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=3),
+ ECStoragePolicy(11, 'done', is_deprecated=True,
+ ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=3),
+ ]
+ policies = StoragePolicyCollection(test_policies)
+ expected = {
+ # default replication
+ (0, True): {
+ 'name': 'zero',
+ 'default': True,
+ 'deprecated': False,
+ 'policy_type': REPL_POLICY
+ },
+ (0, False): {
+ 'name': 'zero',
+ 'default': True,
+ },
+ # deprecated replication
+ (1, True): {
+ 'name': 'one',
+ 'default': False,
+ 'deprecated': True,
+ 'policy_type': REPL_POLICY
+ },
+ (1, False): {
+ 'name': 'one',
+ 'deprecated': True,
+ },
+ # enabled ec
+ (10, True): {
+ 'name': 'ten',
+ 'default': False,
+ 'deprecated': False,
+ 'policy_type': EC_POLICY,
+ 'ec_type': 'jerasure_rs_vand',
+ 'ec_num_data_fragments': 10,
+ 'ec_num_parity_fragments': 3,
+ 'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
+ },
+ (10, False): {
+ 'name': 'ten',
+ },
+ # deprecated ec
+ (11, True): {
+ 'name': 'done',
+ 'default': False,
+ 'deprecated': True,
+ 'policy_type': EC_POLICY,
+ 'ec_type': 'jerasure_rs_vand',
+ 'ec_num_data_fragments': 10,
+ 'ec_num_parity_fragments': 3,
+ 'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
+ },
+ (11, False): {
+ 'name': 'done',
+ 'deprecated': True,
+ },
+ }
+ self.maxDiff = None
+ for policy in policies:
+ expected_info = expected[(int(policy), True)]
+ self.assertEqual(policy.get_info(config=True), expected_info)
+ expected_info = expected[(int(policy), False)]
+ self.assertEqual(policy.get_info(config=False), expected_info)
if __name__ == '__main__':
diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py
index fffb33ecf..7015abb8e 100644
--- a/test/unit/common/test_swob.py
+++ b/test/unit/common/test_swob.py
@@ -1553,6 +1553,17 @@ class TestConditionalIfMatch(unittest.TestCase):
self.assertEquals(resp.status_int, 200)
self.assertEquals(body, 'hi')
+ def test_simple_conditional_etag_match(self):
+ # if etag matches, proceed as normal
+ req = swift.common.swob.Request.blank(
+ '/', headers={'If-Match': 'not-the-etag'})
+ resp = req.get_response(self.fake_app)
+ resp.conditional_response = True
+ resp._conditional_etag = 'not-the-etag'
+ body = ''.join(resp(req.environ, self.fake_start_response))
+ self.assertEquals(resp.status_int, 200)
+ self.assertEquals(body, 'hi')
+
def test_quoted_simple_match(self):
# double quotes or not, doesn't matter
req = swift.common.swob.Request.blank(
@@ -1573,6 +1584,16 @@ class TestConditionalIfMatch(unittest.TestCase):
self.assertEquals(resp.status_int, 412)
self.assertEquals(body, '')
+ def test_simple_conditional_etag_no_match(self):
+ req = swift.common.swob.Request.blank(
+ '/', headers={'If-Match': 'the-etag'})
+ resp = req.get_response(self.fake_app)
+ resp.conditional_response = True
+ resp._conditional_etag = 'not-the-etag'
+ body = ''.join(resp(req.environ, self.fake_start_response))
+ self.assertEquals(resp.status_int, 412)
+ self.assertEquals(body, '')
+
def test_match_star(self):
# "*" means match anything; see RFC 2616 section 14.24
req = swift.common.swob.Request.blank(
diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
index 1489501e5..22aa3db5e 100644
--- a/test/unit/common/test_utils.py
+++ b/test/unit/common/test_utils.py
@@ -2190,13 +2190,14 @@ cluster_dfw1 = http://dfw1.host/v1/
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
- def test_quorum_size(self):
+ def test_replication_quorum_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
- got_sizes = dict([(n, utils.quorum_size(n)) for n in expected_sizes])
+ got_sizes = dict([(n, utils.quorum_size(n))
+ for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
@@ -4593,6 +4594,22 @@ class TestLRUCache(unittest.TestCase):
self.assertEqual(f.size(), 4)
+class TestParseContentRange(unittest.TestCase):
+ def test_good(self):
+ start, end, total = utils.parse_content_range("bytes 100-200/300")
+ self.assertEqual(start, 100)
+ self.assertEqual(end, 200)
+ self.assertEqual(total, 300)
+
+ def test_bad(self):
+ self.assertRaises(ValueError, utils.parse_content_range,
+ "100-300/500")
+ self.assertRaises(ValueError, utils.parse_content_range,
+ "bytes 100-200/aardvark")
+ self.assertRaises(ValueError, utils.parse_content_range,
+ "bytes bulbous-bouffant/4994801")
+
+
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
@@ -4622,7 +4639,8 @@ class TestIterMultipartMimeDocuments(unittest.TestCase):
it.next()
except MimeInvalid as err:
exc = err
- self.assertEquals(str(exc), 'invalid starting boundary')
+ self.assertTrue('invalid starting boundary' in str(exc))
+ self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py
index 67142decd..279eb8624 100644
--- a/test/unit/common/test_wsgi.py
+++ b/test/unit/common/test_wsgi.py
@@ -156,6 +156,27 @@ class TestWSGI(unittest.TestCase):
logger.info('testing')
self.assertEquals('proxy-server', log_name)
+ @with_tempdir
+ def test_loadapp_from_file(self, tempdir):
+ conf_path = os.path.join(tempdir, 'object-server.conf')
+ conf_body = """
+ [app:main]
+ use = egg:swift#object
+ """
+ contents = dedent(conf_body)
+ with open(conf_path, 'w') as f:
+ f.write(contents)
+ app = wsgi.loadapp(conf_path)
+ self.assertTrue(isinstance(app, obj_server.ObjectController))
+
+ def test_loadapp_from_string(self):
+ conf_body = """
+ [app:main]
+ use = egg:swift#object
+ """
+ app = wsgi.loadapp(wsgi.ConfigString(conf_body))
+ self.assertTrue(isinstance(app, obj_server.ObjectController))
+
def test_init_request_processor_from_conf_dir(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py
index aa5cebc28..8c6d89532 100644
--- a/test/unit/container/test_sync.py
+++ b/test/unit/container/test_sync.py
@@ -14,17 +14,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import re
+import os
import unittest
from contextlib import nested
+from textwrap import dedent
import mock
-from test.unit import FakeLogger
+from test.unit import debug_logger
from swift.container import sync
from swift.common import utils
+from swift.common.wsgi import ConfigString
from swift.common.exceptions import ClientException
from swift.common.storage_policy import StoragePolicy
-from test.unit import patch_policies
+import test
+from test.unit import patch_policies, with_tempdir
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'endcap'
@@ -71,6 +74,9 @@ class FakeContainerBroker(object):
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerSync(unittest.TestCase):
+ def setUp(self):
+ self.logger = debug_logger('test-container-sync')
+
def test_FileLikeIter(self):
# Retained test to show new FileLikeIter acts just like the removed
# _Iter2FileLikeObject did.
@@ -96,10 +102,55 @@ class TestContainerSync(unittest.TestCase):
self.assertEquals(flo.read(), '')
self.assertEquals(flo.read(2), '')
- def test_init(self):
+ def assertLogMessage(self, msg_level, expected, skip=0):
+ for line in self.logger.get_lines_for_level(msg_level)[skip:]:
+ msg = 'expected %r not in %r' % (expected, line)
+ self.assertTrue(expected in line, msg)
+
+ @with_tempdir
+ def test_init(self, tempdir):
+ ic_conf_path = os.path.join(tempdir, 'internal-client.conf')
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
+ self.assertTrue(cs.container_ring is cring)
+
+ # specified but not exists will not start
+ conf = {'internal_client_conf_path': ic_conf_path}
+ self.assertRaises(SystemExit, sync.ContainerSync, conf,
+ container_ring=cring, logger=self.logger)
+
+ # not specified will use default conf
+ with mock.patch('swift.container.sync.InternalClient') as mock_ic:
+ cs = sync.ContainerSync({}, container_ring=cring,
+ logger=self.logger)
+ self.assertTrue(cs.container_ring is cring)
+ self.assertTrue(mock_ic.called)
+ conf_path, name, retry = mock_ic.call_args[0]
+ self.assertTrue(isinstance(conf_path, ConfigString))
+ self.assertEquals(conf_path.contents.getvalue(),
+ dedent(sync.ic_conf_body))
+ self.assertLogMessage('warning', 'internal_client_conf_path')
+ self.assertLogMessage('warning', 'internal-client.conf-sample')
+
+ # correct
+ contents = dedent(sync.ic_conf_body)
+ with open(ic_conf_path, 'w') as f:
+ f.write(contents)
+ with mock.patch('swift.container.sync.InternalClient') as mock_ic:
+ cs = sync.ContainerSync(conf, container_ring=cring)
self.assertTrue(cs.container_ring is cring)
+ self.assertTrue(mock_ic.called)
+ conf_path, name, retry = mock_ic.call_args[0]
+ self.assertEquals(conf_path, ic_conf_path)
+
+ sample_conf_filename = os.path.join(
+ os.path.dirname(test.__file__),
+ '../etc/internal-client.conf-sample')
+ with open(sample_conf_filename) as sample_conf_file:
+ sample_conf = sample_conf_file.read()
+ self.assertEqual(contents, sample_conf)
def test_run_forever(self):
# This runs runs_forever with fakes to succeed for two loops, the first
@@ -142,7 +193,9 @@ class TestContainerSync(unittest.TestCase):
'storage_policy_index': 0})
sync.time = fake_time
sync.sleep = fake_sleep
- cs = sync.ContainerSync({}, container_ring=FakeRing())
+
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=FakeRing())
sync.audit_location_generator = fake_audit_location_generator
cs.run_forever(1, 2, a=3, b=4, verbose=True)
except Exception as err:
@@ -197,7 +250,9 @@ class TestContainerSync(unittest.TestCase):
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0})
sync.time = fake_time
- cs = sync.ContainerSync({}, container_ring=FakeRing())
+
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=FakeRing())
sync.audit_location_generator = fake_audit_location_generator
cs.run_once(1, 2, a=3, b=4, verbose=True)
self.assertEquals(time_calls, [6])
@@ -218,12 +273,14 @@ class TestContainerSync(unittest.TestCase):
def test_container_sync_not_db(self):
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
self.assertEquals(cs.container_failures, 0)
def test_container_sync_missing_db(self):
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
@@ -231,7 +288,8 @@ class TestContainerSync(unittest.TestCase):
# Db could be there due to handoff replication so test that we ignore
# those.
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
@@ -263,7 +321,8 @@ class TestContainerSync(unittest.TestCase):
def test_container_sync_deleted(self):
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
@@ -288,7 +347,8 @@ class TestContainerSync(unittest.TestCase):
def test_container_sync_no_to_or_key(self):
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
@@ -368,7 +428,8 @@ class TestContainerSync(unittest.TestCase):
def test_container_stop_at(self):
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
orig_time = sync.time
try:
@@ -411,7 +472,8 @@ class TestContainerSync(unittest.TestCase):
def test_container_first_loop(self):
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for full syncing, ordinal is 0 and
@@ -543,7 +605,9 @@ class TestContainerSync(unittest.TestCase):
def test_container_second_loop(self):
cring = FakeRing()
- cs = sync.ContainerSync({}, container_ring=cring)
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=cring,
+ logger=self.logger)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
@@ -649,7 +713,6 @@ class TestContainerSync(unittest.TestCase):
hex = 'abcdef'
sync.uuid = FakeUUID
- fake_logger = FakeLogger()
def fake_delete_object(path, name=None, headers=None, proxy=None,
logger=None, timeout=None):
@@ -665,12 +728,14 @@ class TestContainerSync(unittest.TestCase):
headers,
{'x-container-sync-key': 'key', 'x-timestamp': '1.2'})
self.assertEquals(proxy, 'http://proxy')
- self.assertEqual(logger, fake_logger)
self.assertEqual(timeout, 5.0)
+ self.assertEqual(logger, self.logger)
sync.delete_object = fake_delete_object
- cs = sync.ContainerSync({}, container_ring=FakeRing())
- cs.logger = fake_logger
+
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=FakeRing(),
+ logger=self.logger)
cs.http_proxies = ['http://proxy']
# Success
self.assertTrue(cs.container_sync_row(
@@ -749,7 +814,6 @@ class TestContainerSync(unittest.TestCase):
orig_uuid = sync.uuid
orig_shuffle = sync.shuffle
orig_put_object = sync.put_object
- orig_direct_get_object = sync.direct_get_object
try:
class FakeUUID(object):
class uuid4(object):
@@ -757,7 +821,6 @@ class TestContainerSync(unittest.TestCase):
sync.uuid = FakeUUID
sync.shuffle = lambda x: x
- fake_logger = FakeLogger()
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None, logger=None,
@@ -781,24 +844,25 @@ class TestContainerSync(unittest.TestCase):
'content-type': 'text/plain'})
self.assertEquals(contents.read(), 'contents')
self.assertEquals(proxy, 'http://proxy')
- self.assertEqual(logger, fake_logger)
self.assertEqual(timeout, 5.0)
+ self.assertEqual(logger, self.logger)
sync.put_object = fake_put_object
- cs = sync.ContainerSync({}, container_ring=FakeRing())
- cs.logger = fake_logger
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync({}, container_ring=FakeRing(),
+ logger=self.logger)
cs.http_proxies = ['http://proxy']
- def fake_direct_get_object(node, part, account, container, obj,
- headers, resp_chunk_size=1):
- self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
- '0')
- return ({'other-header': 'other header value',
- 'etag': '"etagvalue"', 'x-timestamp': '1.2',
- 'content-type': 'text/plain; swift_bytes=123'},
+ def fake_get_object(acct, con, obj, headers, acceptable_statuses):
+ self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
+ '0')
+ return (200, {'other-header': 'other header value',
+ 'etag': '"etagvalue"', 'x-timestamp': '1.2',
+ 'content-type': 'text/plain; swift_bytes=123'},
iter('contents'))
- sync.direct_get_object = fake_direct_get_object
+
+ cs.swift.get_object = fake_get_object
# Success as everything says it worked
self.assertTrue(cs.container_sync_row(
{'deleted': False,
@@ -809,19 +873,19 @@ class TestContainerSync(unittest.TestCase):
realm, realm_key))
self.assertEquals(cs.container_puts, 1)
- def fake_direct_get_object(node, part, account, container, obj,
- headers, resp_chunk_size=1):
+ def fake_get_object(acct, con, obj, headers, acceptable_statuses):
+ self.assertEquals(headers['X-Newest'], True)
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
'0')
- return ({'date': 'date value',
- 'last-modified': 'last modified value',
- 'x-timestamp': '1.2',
- 'other-header': 'other header value',
- 'etag': '"etagvalue"',
- 'content-type': 'text/plain; swift_bytes=123'},
+ return (200, {'date': 'date value',
+ 'last-modified': 'last modified value',
+ 'x-timestamp': '1.2',
+ 'other-header': 'other header value',
+ 'etag': '"etagvalue"',
+ 'content-type': 'text/plain; swift_bytes=123'},
iter('contents'))
- sync.direct_get_object = fake_direct_get_object
+ cs.swift.get_object = fake_get_object
# Success as everything says it worked, also checks 'date' and
# 'last-modified' headers are removed and that 'etag' header is
# stripped of double quotes.
@@ -836,14 +900,14 @@ class TestContainerSync(unittest.TestCase):
exc = []
- def fake_direct_get_object(node, part, account, container, obj,
- headers, resp_chunk_size=1):
+ def fake_get_object(acct, con, obj, headers, acceptable_statuses):
+ self.assertEquals(headers['X-Newest'], True)
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
'0')
exc.append(Exception('test exception'))
raise exc[-1]
- sync.direct_get_object = fake_direct_get_object
+ cs.swift.get_object = fake_get_object
# Fail due to completely unexpected exception
self.assertFalse(cs.container_sync_row(
{'deleted': False,
@@ -853,22 +917,20 @@ class TestContainerSync(unittest.TestCase):
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEquals(cs.container_puts, 2)
- self.assertEquals(len(exc), 3)
+ self.assertEquals(len(exc), 1)
self.assertEquals(str(exc[-1]), 'test exception')
exc = []
- def fake_direct_get_object(node, part, account, container, obj,
- headers, resp_chunk_size=1):
+ def fake_get_object(acct, con, obj, headers, acceptable_statuses):
+ self.assertEquals(headers['X-Newest'], True)
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
'0')
- if len(exc) == 0:
- exc.append(Exception('test other exception'))
- else:
- exc.append(ClientException('test client exception'))
+
+ exc.append(ClientException('test client exception'))
raise exc[-1]
- sync.direct_get_object = fake_direct_get_object
+ cs.swift.get_object = fake_get_object
# Fail due to all direct_get_object calls failing
self.assertFalse(cs.container_sync_row(
{'deleted': False,
@@ -878,25 +940,22 @@ class TestContainerSync(unittest.TestCase):
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEquals(cs.container_puts, 2)
- self.assertEquals(len(exc), 3)
- self.assertEquals(str(exc[-3]), 'test other exception')
- self.assertEquals(str(exc[-2]), 'test client exception')
+ self.assertEquals(len(exc), 1)
self.assertEquals(str(exc[-1]), 'test client exception')
- def fake_direct_get_object(node, part, account, container, obj,
- headers, resp_chunk_size=1):
+ def fake_get_object(acct, con, obj, headers, acceptable_statuses):
+ self.assertEquals(headers['X-Newest'], True)
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
'0')
- return ({'other-header': 'other header value',
- 'x-timestamp': '1.2', 'etag': '"etagvalue"'},
+ return (200, {'other-header': 'other header value',
+ 'x-timestamp': '1.2', 'etag': '"etagvalue"'},
iter('contents'))
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=401)
- sync.direct_get_object = fake_direct_get_object
+ cs.swift.get_object = fake_get_object
sync.put_object = fake_put_object
- cs.logger = FakeLogger()
# Fail due to 401
self.assertFalse(cs.container_sync_row(
{'deleted': False,
@@ -906,15 +965,13 @@ class TestContainerSync(unittest.TestCase):
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEquals(cs.container_puts, 2)
- self.assert_(re.match('Unauth ',
- cs.logger.log_dict['info'][0][0][0]))
+ self.assertLogMessage('info', 'Unauth')
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=404)
sync.put_object = fake_put_object
# Fail due to 404
- cs.logger = FakeLogger()
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
@@ -923,8 +980,7 @@ class TestContainerSync(unittest.TestCase):
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEquals(cs.container_puts, 2)
- self.assert_(re.match('Not found ',
- cs.logger.log_dict['info'][0][0][0]))
+ self.assertLogMessage('info', 'Not found', 1)
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=503)
@@ -939,29 +995,32 @@ class TestContainerSync(unittest.TestCase):
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEquals(cs.container_puts, 2)
- self.assertTrue(
- cs.logger.log_dict['exception'][0][0][0].startswith(
- 'ERROR Syncing '))
+ self.assertLogMessage('error', 'ERROR Syncing')
finally:
sync.uuid = orig_uuid
sync.shuffle = orig_shuffle
sync.put_object = orig_put_object
- sync.direct_get_object = orig_direct_get_object
def test_select_http_proxy_None(self):
- cs = sync.ContainerSync(
- {'sync_proxy': ''}, container_ring=FakeRing())
+
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync(
+ {'sync_proxy': ''}, container_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), None)
def test_select_http_proxy_one(self):
- cs = sync.ContainerSync(
- {'sync_proxy': 'http://one'}, container_ring=FakeRing())
+
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync(
+ {'sync_proxy': 'http://one'}, container_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), 'http://one')
def test_select_http_proxy_multiple(self):
- cs = sync.ContainerSync(
- {'sync_proxy': 'http://one,http://two,http://three'},
- container_ring=FakeRing())
+
+ with mock.patch('swift.container.sync.InternalClient'):
+ cs = sync.ContainerSync(
+ {'sync_proxy': 'http://one,http://two,http://three'},
+ container_ring=FakeRing())
self.assertEqual(
set(cs.http_proxies),
set(['http://one', 'http://two', 'http://three']))
diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py
index e8f8a2b16..3cfcb4757 100644
--- a/test/unit/obj/test_auditor.py
+++ b/test/unit/obj/test_auditor.py
@@ -28,7 +28,7 @@ from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \
get_data_dir, DiskFileManager, AuditLocation
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
storage_directory
-from swift.common.storage_policy import StoragePolicy
+from swift.common.storage_policy import StoragePolicy, POLICIES
_mocked_policies = [StoragePolicy(0, 'zero', False),
@@ -48,12 +48,16 @@ class TestAuditor(unittest.TestCase):
os.mkdir(os.path.join(self.devices, 'sdb'))
# policy 0
- self.objects = os.path.join(self.devices, 'sda', get_data_dir(0))
- self.objects_2 = os.path.join(self.devices, 'sdb', get_data_dir(0))
+ self.objects = os.path.join(self.devices, 'sda',
+ get_data_dir(POLICIES[0]))
+ self.objects_2 = os.path.join(self.devices, 'sdb',
+ get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
- self.objects_p1 = os.path.join(self.devices, 'sda', get_data_dir(1))
- self.objects_2_p1 = os.path.join(self.devices, 'sdb', get_data_dir(1))
+ self.objects_p1 = os.path.join(self.devices, 'sda',
+ get_data_dir(POLICIES[1]))
+ self.objects_2_p1 = os.path.join(self.devices, 'sdb',
+ get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
@@ -70,9 +74,10 @@ class TestAuditor(unittest.TestCase):
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
- self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', 0)
+ self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
+ policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
- 'o', 1)
+ 'o', policy=POLICIES[1])
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
@@ -125,13 +130,15 @@ class TestAuditor(unittest.TestCase):
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
- AuditLocation(disk_file._datadir, 'sda', '0'))
+ AuditLocation(disk_file._datadir, 'sda', '0',
+ policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, 'extra_data')
auditor_worker.object_audit(
- AuditLocation(disk_file._datadir, 'sda', '0'))
+ AuditLocation(disk_file._datadir, 'sda', '0',
+ policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines,
pre_quarantines + 1)
run_tests(self.disk_file)
@@ -156,10 +163,12 @@ class TestAuditor(unittest.TestCase):
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
- self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
+ self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
auditor_worker.object_audit(
- AuditLocation(self.disk_file._datadir, 'sda', '0'))
+ AuditLocation(self.disk_file._datadir, 'sda', '0',
+ policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
etag = md5()
etag.update('1' + '0' * 1023)
@@ -171,7 +180,8 @@ class TestAuditor(unittest.TestCase):
writer.put(metadata)
auditor_worker.object_audit(
- AuditLocation(self.disk_file._datadir, 'sda', '0'))
+ AuditLocation(self.disk_file._datadir, 'sda', '0',
+ policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
@@ -186,7 +196,8 @@ class TestAuditor(unittest.TestCase):
self.rcache, self.devices)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
- AuditLocation(self.disk_file._datadir, 'sda', '0'))
+ AuditLocation(self.disk_file._datadir, 'sda', '0',
+ policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_will_not_swallow_errors_in_tests(self):
@@ -203,7 +214,8 @@ class TestAuditor(unittest.TestCase):
with mock.patch.object(DiskFileManager,
'get_diskfile_from_audit_location', blowup):
self.assertRaises(NameError, auditor_worker.object_audit,
- AuditLocation(os.path.dirname(path), 'sda', '0'))
+ AuditLocation(os.path.dirname(path), 'sda', '0',
+ policy=POLICIES.legacy))
def test_failsafe_object_audit_will_swallow_errors_in_tests(self):
timestamp = str(normalize_timestamp(time.time()))
@@ -216,9 +228,11 @@ class TestAuditor(unittest.TestCase):
def blowup(*args):
raise NameError('tpyo')
- with mock.patch('swift.obj.diskfile.DiskFile', blowup):
+ with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls',
+ blowup):
auditor_worker.failsafe_object_audit(
- AuditLocation(os.path.dirname(path), 'sda', '0'))
+ AuditLocation(os.path.dirname(path), 'sda', '0',
+ policy=POLICIES.legacy))
self.assertEquals(auditor_worker.errors, 1)
def test_generic_exception_handling(self):
@@ -240,7 +254,8 @@ class TestAuditor(unittest.TestCase):
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
- with mock.patch('swift.obj.diskfile.DiskFile', lambda *_: 1 / 0):
+ with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls',
+ lambda *_: 1 / 0):
auditor_worker.audit_all_objects()
self.assertEquals(auditor_worker.errors, pre_errors + 1)
@@ -368,7 +383,8 @@ class TestAuditor(unittest.TestCase):
}
writer.put(metadata)
auditor_worker.audit_all_objects()
- self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'ob')
+ self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'ob',
+ policy=POLICIES.legacy)
data = '1' * 10
etag = md5()
with self.disk_file.create() as writer:
@@ -424,7 +440,7 @@ class TestAuditor(unittest.TestCase):
name_hash = hash_path('a', 'c', 'o')
dir_path = os.path.join(
self.devices, 'sda',
- storage_directory(get_data_dir(0), '0', name_hash))
+ storage_directory(get_data_dir(POLICIES[0]), '0', name_hash))
ts_file_path = os.path.join(dir_path, '99999.ts')
if not os.path.exists(dir_path):
mkdirs(dir_path)
@@ -474,9 +490,8 @@ class TestAuditor(unittest.TestCase):
DiskFile._quarantine(self, data_file, msg)
self.setup_bad_zero_byte()
- was_df = auditor.diskfile.DiskFile
- try:
- auditor.diskfile.DiskFile = FakeFile
+ with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls',
+ FakeFile):
kwargs = {'mode': 'once'}
kwargs['zero_byte_fps'] = 50
self.auditor.run_audit(**kwargs)
@@ -484,8 +499,6 @@ class TestAuditor(unittest.TestCase):
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
self.assertTrue(rat[0])
- finally:
- auditor.diskfile.DiskFile = was_df
@mock.patch.object(auditor.ObjectAuditor, 'run_audit')
@mock.patch('os.fork', return_value=0)
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index cc6747555..2ccf3b136 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -19,6 +19,7 @@
import cPickle as pickle
import os
import errno
+import itertools
import mock
import unittest
import email
@@ -26,6 +27,8 @@ import tempfile
import uuid
import xattr
import re
+from collections import defaultdict
+from random import shuffle, randint
from shutil import rmtree
from time import time
from tempfile import mkdtemp
@@ -35,7 +38,7 @@ from gzip import GzipFile
from eventlet import hubs, timeout, tpool
from test.unit import (FakeLogger, mock as unit_mock, temptree,
- patch_policies, debug_logger)
+ patch_policies, debug_logger, EMPTY_ETAG)
from nose import SkipTest
from swift.obj import diskfile
@@ -45,32 +48,61 @@ from swift.common import ring
from swift.common.splice import splice
from swift.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
- DiskFileError, ReplicationLockTimeout, PathNotDir, DiskFileCollision, \
+ DiskFileError, ReplicationLockTimeout, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace, DiskFileXattrNotSupported
-from swift.common.storage_policy import POLICIES, get_policy_string
-from functools import partial
-
-
-get_data_dir = partial(get_policy_string, diskfile.DATADIR_BASE)
-get_tmp_dir = partial(get_policy_string, diskfile.TMP_BASE)
-
-
-def _create_test_ring(path):
- testgz = os.path.join(path, 'object.ring.gz')
+from swift.common.storage_policy import (
+ POLICIES, get_policy_string, StoragePolicy, ECStoragePolicy,
+ BaseStoragePolicy, REPL_POLICY, EC_POLICY)
+
+
+test_policies = [
+ StoragePolicy(0, name='zero', is_default=True),
+ ECStoragePolicy(1, name='one', is_default=False,
+ ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=4),
+]
+
+
+def find_paths_with_matching_suffixes(needed_matches=2, needed_suffixes=3):
+ paths = defaultdict(list)
+ while True:
+ path = ('a', 'c', uuid.uuid4().hex)
+ hash_ = hash_path(*path)
+ suffix = hash_[-3:]
+ paths[suffix].append(path)
+ if len(paths) < needed_suffixes:
+ # in the extreamly unlikely situation where you land the matches
+ # you need before you get the total suffixes you need - it's
+ # simpler to just ignore this suffix for now
+ continue
+ if len(paths[suffix]) >= needed_matches:
+ break
+ return paths, suffix
+
+
+def _create_test_ring(path, policy):
+ ring_name = get_policy_string('object', policy)
+ testgz = os.path.join(path, ring_name + '.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5]]
intended_devs = [
- {'id': 0, 'device': 'sda', 'zone': 0, 'ip': '127.0.0.0', 'port': 6000},
- {'id': 1, 'device': 'sda', 'zone': 1, 'ip': '127.0.0.1', 'port': 6000},
- {'id': 2, 'device': 'sda', 'zone': 2, 'ip': '127.0.0.2', 'port': 6000},
- {'id': 3, 'device': 'sda', 'zone': 4, 'ip': '127.0.0.3', 'port': 6000},
- {'id': 4, 'device': 'sda', 'zone': 5, 'ip': '127.0.0.4', 'port': 6000},
- {'id': 5, 'device': 'sda', 'zone': 6,
+ {'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
+ 'port': 6000},
+ {'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1',
+ 'port': 6000},
+ {'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
+ 'port': 6000},
+ {'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
+ 'port': 6000},
+ {'id': 4, 'device': 'sda1', 'zone': 5, 'ip': '127.0.0.4',
+ 'port': 6000},
+ {'id': 5, 'device': 'sda1', 'zone': 6,
'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000},
- {'id': 6, 'device': 'sda', 'zone': 7,
- 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000}]
+ {'id': 6, 'device': 'sda1', 'zone': 7,
+ 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334',
+ 'port': 6000}]
intended_part_shift = 30
intended_reload_time = 15
with closing(GzipFile(testgz, 'wb')) as f:
@@ -78,7 +110,7 @@ def _create_test_ring(path):
ring.RingData(intended_replica2part2dev_id, intended_devs,
intended_part_shift),
f)
- return ring.Ring(path, ring_name='object',
+ return ring.Ring(path, ring_name=ring_name,
reload_time=intended_reload_time)
@@ -88,13 +120,13 @@ class TestDiskFileModuleMethods(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = ''
- # Setup a test ring (stolen from common/test_ring.py)
+ # Setup a test ring per policy (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
- self.existing_device = 'sda'
+ self.existing_device = 'sda1'
os.mkdir(os.path.join(self.devices, self.existing_device))
self.objects = os.path.join(self.devices, self.existing_device,
'objects')
@@ -103,7 +135,7 @@ class TestDiskFileModuleMethods(unittest.TestCase):
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
- self.ring = _create_test_ring(self.testdir)
+ self.ring = _create_test_ring(self.testdir, POLICIES.legacy)
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
@@ -112,59 +144,58 @@ class TestDiskFileModuleMethods(unittest.TestCase):
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
- def _create_diskfile(self, policy_idx=0):
+ def _create_diskfile(self, policy):
return self.df_mgr.get_diskfile(self.existing_device,
'0', 'a', 'c', 'o',
- policy_idx)
+ policy=policy)
- def test_extract_policy_index(self):
+ def test_extract_policy(self):
# good path names
pn = 'objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
- self.assertEqual(diskfile.extract_policy_index(pn), 0)
+ self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
pn = 'objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
- self.assertEqual(diskfile.extract_policy_index(pn), 1)
+ self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
+
+ # leading slash
+ pn = '/objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
+ self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
+ pn = '/objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
+ self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
+
+ # full paths
good_path = '/srv/node/sda1/objects-1/1/abc/def/1234.data'
- self.assertEquals(1, diskfile.extract_policy_index(good_path))
+ self.assertEqual(diskfile.extract_policy(good_path), POLICIES[1])
good_path = '/srv/node/sda1/objects/1/abc/def/1234.data'
- self.assertEquals(0, diskfile.extract_policy_index(good_path))
+ self.assertEqual(diskfile.extract_policy(good_path), POLICIES[0])
- # short paths still ok
+ # short paths
path = '/srv/node/sda1/objects/1/1234.data'
- self.assertEqual(diskfile.extract_policy_index(path), 0)
+ self.assertEqual(diskfile.extract_policy(path), POLICIES[0])
path = '/srv/node/sda1/objects-1/1/1234.data'
- self.assertEqual(diskfile.extract_policy_index(path), 1)
-
- # leading slash, just in case
- pn = '/objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
- self.assertEqual(diskfile.extract_policy_index(pn), 0)
- pn = '/objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
- self.assertEqual(diskfile.extract_policy_index(pn), 1)
+ self.assertEqual(diskfile.extract_policy(path), POLICIES[1])
- # bad policy index
+ # well formatted but, unknown policy index
pn = 'objects-2/0/606/198427efcff042c78606/1401379842.14643.data'
- self.assertEqual(diskfile.extract_policy_index(pn), 0)
- bad_path = '/srv/node/sda1/objects-t/1/abc/def/1234.data'
- self.assertRaises(ValueError,
- diskfile.extract_policy_index, bad_path)
+ self.assertEqual(diskfile.extract_policy(pn), None)
- # malformed path (no objects dir or nothing at all)
+ # malformed path
+ self.assertEqual(diskfile.extract_policy(''), None)
+ bad_path = '/srv/node/sda1/objects-t/1/abc/def/1234.data'
+ self.assertEqual(diskfile.extract_policy(bad_path), None)
pn = 'XXXX/0/606/1984527ed42b6ef6247c78606/1401379842.14643.data'
- self.assertEqual(diskfile.extract_policy_index(pn), 0)
- self.assertEqual(diskfile.extract_policy_index(''), 0)
-
- # no datadir base in path
+ self.assertEqual(diskfile.extract_policy(pn), None)
bad_path = '/srv/node/sda1/foo-1/1/abc/def/1234.data'
- self.assertEqual(diskfile.extract_policy_index(bad_path), 0)
+ self.assertEqual(diskfile.extract_policy(bad_path), None)
bad_path = '/srv/node/sda1/obj1/1/abc/def/1234.data'
- self.assertEqual(diskfile.extract_policy_index(bad_path), 0)
+ self.assertEqual(diskfile.extract_policy(bad_path), None)
def test_quarantine_renamer(self):
for policy in POLICIES:
# we use this for convenience, not really about a diskfile layout
- df = self._create_diskfile(policy_idx=policy.idx)
+ df = self._create_diskfile(policy=policy)
mkdirs(df._datadir)
exp_dir = os.path.join(self.devices, 'quarantined',
- get_data_dir(policy.idx),
+ diskfile.get_data_dir(policy),
os.path.basename(df._datadir))
qbit = os.path.join(df._datadir, 'qbit')
with open(qbit, 'w') as f:
@@ -174,38 +205,28 @@ class TestDiskFileModuleMethods(unittest.TestCase):
self.assertRaises(OSError, diskfile.quarantine_renamer,
self.devices, qbit)
- def test_hash_suffix_enoent(self):
- self.assertRaises(PathNotDir, diskfile.hash_suffix,
- os.path.join(self.testdir, "doesnotexist"), 101)
-
- def test_hash_suffix_oserror(self):
- mocked_os_listdir = mock.Mock(
- side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))
- with mock.patch("os.listdir", mocked_os_listdir):
- self.assertRaises(OSError, diskfile.hash_suffix,
- os.path.join(self.testdir, "doesnotexist"), 101)
-
def test_get_data_dir(self):
- self.assertEquals(diskfile.get_data_dir(0), diskfile.DATADIR_BASE)
- self.assertEquals(diskfile.get_data_dir(1),
+ self.assertEquals(diskfile.get_data_dir(POLICIES[0]),
+ diskfile.DATADIR_BASE)
+ self.assertEquals(diskfile.get_data_dir(POLICIES[1]),
diskfile.DATADIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_data_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_data_dir, 99)
def test_get_async_dir(self):
- self.assertEquals(diskfile.get_async_dir(0),
+ self.assertEquals(diskfile.get_async_dir(POLICIES[0]),
diskfile.ASYNCDIR_BASE)
- self.assertEquals(diskfile.get_async_dir(1),
+ self.assertEquals(diskfile.get_async_dir(POLICIES[1]),
diskfile.ASYNCDIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_async_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_async_dir, 99)
def test_get_tmp_dir(self):
- self.assertEquals(diskfile.get_tmp_dir(0),
+ self.assertEquals(diskfile.get_tmp_dir(POLICIES[0]),
diskfile.TMP_BASE)
- self.assertEquals(diskfile.get_tmp_dir(1),
+ self.assertEquals(diskfile.get_tmp_dir(POLICIES[1]),
diskfile.TMP_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_tmp_dir, 'junk')
@@ -221,7 +242,7 @@ class TestDiskFileModuleMethods(unittest.TestCase):
self.devices, self.existing_device, tmp_part)
self.assertFalse(os.path.isdir(tmp_path))
pickle_args = (self.existing_device, 'a', 'c', 'o',
- 'data', 0.0, int(policy))
+ 'data', 0.0, policy)
# async updates don't create their tmpdir on their own
self.assertRaises(OSError, self.df_mgr.pickle_async_update,
*pickle_args)
@@ -231,438 +252,6 @@ class TestDiskFileModuleMethods(unittest.TestCase):
# check tempdir
self.assertTrue(os.path.isdir(tmp_path))
- def test_hash_suffix_hash_dir_is_file_quarantine(self):
- df = self._create_diskfile()
- mkdirs(os.path.dirname(df._datadir))
- open(df._datadir, 'wb').close()
- ohash = hash_path('a', 'c', 'o')
- data_dir = ohash[-3:]
- whole_path_from = os.path.join(self.objects, '0', data_dir)
- orig_quarantine_renamer = diskfile.quarantine_renamer
- called = [False]
-
- def wrapped(*args, **kwargs):
- called[0] = True
- return orig_quarantine_renamer(*args, **kwargs)
-
- try:
- diskfile.quarantine_renamer = wrapped
- diskfile.hash_suffix(whole_path_from, 101)
- finally:
- diskfile.quarantine_renamer = orig_quarantine_renamer
- self.assertTrue(called[0])
-
- def test_hash_suffix_one_file(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- f = open(
- os.path.join(df._datadir,
- Timestamp(time() - 100).internal + '.ts'),
- 'wb')
- f.write('1234567890')
- f.close()
- ohash = hash_path('a', 'c', 'o')
- data_dir = ohash[-3:]
- whole_path_from = os.path.join(self.objects, '0', data_dir)
- diskfile.hash_suffix(whole_path_from, 101)
- self.assertEquals(len(os.listdir(self.parts['0'])), 1)
-
- diskfile.hash_suffix(whole_path_from, 99)
- self.assertEquals(len(os.listdir(self.parts['0'])), 0)
-
- def test_hash_suffix_oserror_on_hcl(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- f = open(
- os.path.join(df._datadir,
- Timestamp(time() - 100).internal + '.ts'),
- 'wb')
- f.write('1234567890')
- f.close()
- ohash = hash_path('a', 'c', 'o')
- data_dir = ohash[-3:]
- whole_path_from = os.path.join(self.objects, '0', data_dir)
- state = [0]
- orig_os_listdir = os.listdir
-
- def mock_os_listdir(*args, **kwargs):
- # We want the first call to os.listdir() to succeed, which is the
- # one directly from hash_suffix() itself, but then we want to fail
- # the next call to os.listdir() which is from
- # hash_cleanup_listdir()
- if state[0] == 1:
- raise OSError(errno.EACCES, os.strerror(errno.EACCES))
- state[0] = 1
- return orig_os_listdir(*args, **kwargs)
-
- with mock.patch('os.listdir', mock_os_listdir):
- self.assertRaises(OSError, diskfile.hash_suffix, whole_path_from,
- 101)
-
- def test_hash_suffix_multi_file_one(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- for tdiff in [1, 50, 100, 500]:
- for suff in ['.meta', '.data', '.ts']:
- f = open(
- os.path.join(
- df._datadir,
- Timestamp(int(time()) - tdiff).internal + suff),
- 'wb')
- f.write('1234567890')
- f.close()
-
- ohash = hash_path('a', 'c', 'o')
- data_dir = ohash[-3:]
- whole_path_from = os.path.join(self.objects, '0', data_dir)
- hsh_path = os.listdir(whole_path_from)[0]
- whole_hsh_path = os.path.join(whole_path_from, hsh_path)
-
- diskfile.hash_suffix(whole_path_from, 99)
- # only the tombstone should be left
- self.assertEquals(len(os.listdir(whole_hsh_path)), 1)
-
- def test_hash_suffix_multi_file_two(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- for tdiff in [1, 50, 100, 500]:
- suffs = ['.meta', '.data']
- if tdiff > 50:
- suffs.append('.ts')
- for suff in suffs:
- f = open(
- os.path.join(
- df._datadir,
- Timestamp(int(time()) - tdiff).internal + suff),
- 'wb')
- f.write('1234567890')
- f.close()
-
- ohash = hash_path('a', 'c', 'o')
- data_dir = ohash[-3:]
- whole_path_from = os.path.join(self.objects, '0', data_dir)
- hsh_path = os.listdir(whole_path_from)[0]
- whole_hsh_path = os.path.join(whole_path_from, hsh_path)
-
- diskfile.hash_suffix(whole_path_from, 99)
- # only the meta and data should be left
- self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
-
- def test_hash_suffix_hsh_path_disappearance(self):
- orig_rmdir = os.rmdir
-
- def _rmdir(path):
- # Done twice to recreate what happens when it doesn't exist.
- orig_rmdir(path)
- orig_rmdir(path)
-
- df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
- mkdirs(df._datadir)
- ohash = hash_path('a', 'c', 'o')
- suffix = ohash[-3:]
- suffix_path = os.path.join(self.objects, '0', suffix)
- with mock.patch('os.rmdir', _rmdir):
- # If hash_suffix doesn't handle the exception _rmdir will raise,
- # this test will fail.
- diskfile.hash_suffix(suffix_path, 123)
-
- def test_invalidate_hash(self):
-
- def assertFileData(file_path, data):
- with open(file_path, 'r') as fp:
- fdata = fp.read()
- self.assertEquals(pickle.loads(fdata), pickle.loads(data))
-
- df = self._create_diskfile()
- mkdirs(df._datadir)
- ohash = hash_path('a', 'c', 'o')
- data_dir = ohash[-3:]
- whole_path_from = os.path.join(self.objects, '0', data_dir)
- hashes_file = os.path.join(self.objects, '0',
- diskfile.HASH_FILE)
- # test that non existent file except caught
- self.assertEquals(diskfile.invalidate_hash(whole_path_from),
- None)
- # test that hashes get cleared
- check_pickle_data = pickle.dumps({data_dir: None},
- diskfile.PICKLE_PROTOCOL)
- for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
- with open(hashes_file, 'wb') as fp:
- pickle.dump(data_hash, fp, diskfile.PICKLE_PROTOCOL)
- diskfile.invalidate_hash(whole_path_from)
- assertFileData(hashes_file, check_pickle_data)
-
- def test_invalidate_hash_bad_pickle(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- ohash = hash_path('a', 'c', 'o')
- data_dir = ohash[-3:]
- whole_path_from = os.path.join(self.objects, '0', data_dir)
- hashes_file = os.path.join(self.objects, '0',
- diskfile.HASH_FILE)
- for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
- with open(hashes_file, 'wb') as fp:
- fp.write('bad hash data')
- try:
- diskfile.invalidate_hash(whole_path_from)
- except Exception as err:
- self.fail("Unexpected exception raised: %s" % err)
- else:
- pass
-
- def test_get_hashes(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- with open(
- os.path.join(df._datadir,
- Timestamp(time()).internal + '.ts'),
- 'wb') as f:
- f.write('1234567890')
- part = os.path.join(self.objects, '0')
- hashed, hashes = diskfile.get_hashes(part)
- self.assertEquals(hashed, 1)
- self.assert_('a83' in hashes)
- hashed, hashes = diskfile.get_hashes(part, do_listdir=True)
- self.assertEquals(hashed, 0)
- self.assert_('a83' in hashes)
- hashed, hashes = diskfile.get_hashes(part, recalculate=['a83'])
- self.assertEquals(hashed, 1)
- self.assert_('a83' in hashes)
-
- def test_get_hashes_bad_dir(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- with open(os.path.join(self.objects, '0', 'bad'), 'wb') as f:
- f.write('1234567890')
- part = os.path.join(self.objects, '0')
- hashed, hashes = diskfile.get_hashes(part)
- self.assertEquals(hashed, 1)
- self.assert_('a83' in hashes)
- self.assert_('bad' not in hashes)
-
- def test_get_hashes_unmodified(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- with open(
- os.path.join(df._datadir,
- Timestamp(time()).internal + '.ts'),
- 'wb') as f:
- f.write('1234567890')
- part = os.path.join(self.objects, '0')
- hashed, hashes = diskfile.get_hashes(part)
- i = [0]
-
- def _getmtime(filename):
- i[0] += 1
- return 1
- with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
- hashed, hashes = diskfile.get_hashes(
- part, recalculate=['a83'])
- self.assertEquals(i[0], 2)
-
- def test_get_hashes_unmodified_norecalc(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- with open(
- os.path.join(df._datadir,
- Timestamp(time()).internal + '.ts'),
- 'wb') as f:
- f.write('1234567890')
- part = os.path.join(self.objects, '0')
- hashed, hashes_0 = diskfile.get_hashes(part)
- self.assertEqual(hashed, 1)
- self.assertTrue('a83' in hashes_0)
- hashed, hashes_1 = diskfile.get_hashes(part)
- self.assertEqual(hashed, 0)
- self.assertTrue('a83' in hashes_0)
- self.assertEqual(hashes_1, hashes_0)
-
- def test_get_hashes_hash_suffix_error(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- with open(
- os.path.join(df._datadir,
- Timestamp(time()).internal + '.ts'),
- 'wb') as f:
- f.write('1234567890')
- part = os.path.join(self.objects, '0')
- mocked_hash_suffix = mock.MagicMock(
- side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))
- with mock.patch('swift.obj.diskfile.hash_suffix', mocked_hash_suffix):
- hashed, hashes = diskfile.get_hashes(part)
- self.assertEqual(hashed, 0)
- self.assertEqual(hashes, {'a83': None})
-
- def test_get_hashes_unmodified_and_zero_bytes(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- part = os.path.join(self.objects, '0')
- open(os.path.join(part, diskfile.HASH_FILE), 'w')
- # Now the hash file is zero bytes.
- i = [0]
-
- def _getmtime(filename):
- i[0] += 1
- return 1
- with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
- hashed, hashes = diskfile.get_hashes(
- part, recalculate=[])
- # getmtime will actually not get called. Initially, the pickle.load
- # will raise an exception first and later, force_rewrite will
- # short-circuit the if clause to determine whether to write out a
- # fresh hashes_file.
- self.assertEquals(i[0], 0)
- self.assertTrue('a83' in hashes)
-
- def test_get_hashes_modified(self):
- df = self._create_diskfile()
- mkdirs(df._datadir)
- with open(
- os.path.join(df._datadir,
- Timestamp(time()).internal + '.ts'),
- 'wb') as f:
- f.write('1234567890')
- part = os.path.join(self.objects, '0')
- hashed, hashes = diskfile.get_hashes(part)
- i = [0]
-
- def _getmtime(filename):
- if i[0] < 3:
- i[0] += 1
- return i[0]
- with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
- hashed, hashes = diskfile.get_hashes(
- part, recalculate=['a83'])
- self.assertEquals(i[0], 3)
-
- def check_hash_cleanup_listdir(self, input_files, output_files):
- orig_unlink = os.unlink
- file_list = list(input_files)
-
- def mock_listdir(path):
- return list(file_list)
-
- def mock_unlink(path):
- # timestamp 1 is a special tag to pretend a file disappeared while
- # working.
- if '/0000000001.00000.' in path:
- # Using actual os.unlink to reproduce exactly what OSError it
- # raises.
- orig_unlink(uuid.uuid4().hex)
- file_list.remove(os.path.basename(path))
-
- with unit_mock({'os.listdir': mock_listdir, 'os.unlink': mock_unlink}):
- self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
- output_files)
-
- def test_hash_cleanup_listdir_purge_data_newer_ts(self):
- # purge .data if there's a newer .ts
- file1 = Timestamp(time()).internal + '.data'
- file2 = Timestamp(time() + 1).internal + '.ts'
- file_list = [file1, file2]
- self.check_hash_cleanup_listdir(file_list, [file2])
-
- def test_hash_cleanup_listdir_purge_ts_newer_data(self):
- # purge .ts if there's a newer .data
- file1 = Timestamp(time()).internal + '.ts'
- file2 = Timestamp(time() + 1).internal + '.data'
- file_list = [file1, file2]
- self.check_hash_cleanup_listdir(file_list, [file2])
-
- def test_hash_cleanup_listdir_keep_meta_data_purge_ts(self):
- # keep .meta and .data if meta newer than data and purge .ts
- file1 = Timestamp(time()).internal + '.ts'
- file2 = Timestamp(time() + 1).internal + '.data'
- file3 = Timestamp(time() + 2).internal + '.meta'
- file_list = [file1, file2, file3]
- self.check_hash_cleanup_listdir(file_list, [file3, file2])
-
- def test_hash_cleanup_listdir_keep_one_ts(self):
- # keep only latest of multiple .ts files
- file1 = Timestamp(time()).internal + '.ts'
- file2 = Timestamp(time() + 1).internal + '.ts'
- file3 = Timestamp(time() + 2).internal + '.ts'
- file_list = [file1, file2, file3]
- self.check_hash_cleanup_listdir(file_list, [file3])
-
- def test_hash_cleanup_listdir_keep_one_data(self):
- # keep only latest of multiple .data files
- file1 = Timestamp(time()).internal + '.data'
- file2 = Timestamp(time() + 1).internal + '.data'
- file3 = Timestamp(time() + 2).internal + '.data'
- file_list = [file1, file2, file3]
- self.check_hash_cleanup_listdir(file_list, [file3])
-
- def test_hash_cleanup_listdir_keep_one_meta(self):
- # keep only latest of multiple .meta files
- file1 = Timestamp(time()).internal + '.data'
- file2 = Timestamp(time() + 1).internal + '.meta'
- file3 = Timestamp(time() + 2).internal + '.meta'
- file_list = [file1, file2, file3]
- self.check_hash_cleanup_listdir(file_list, [file3, file1])
-
- def test_hash_cleanup_listdir_ignore_orphaned_ts(self):
- # A more recent orphaned .meta file will prevent old .ts files
- # from being cleaned up otherwise
- file1 = Timestamp(time()).internal + '.ts'
- file2 = Timestamp(time() + 1).internal + '.ts'
- file3 = Timestamp(time() + 2).internal + '.meta'
- file_list = [file1, file2, file3]
- self.check_hash_cleanup_listdir(file_list, [file3, file2])
-
- def test_hash_cleanup_listdir_purge_old_data_only(self):
- # Oldest .data will be purge, .meta and .ts won't be touched
- file1 = Timestamp(time()).internal + '.data'
- file2 = Timestamp(time() + 1).internal + '.ts'
- file3 = Timestamp(time() + 2).internal + '.meta'
- file_list = [file1, file2, file3]
- self.check_hash_cleanup_listdir(file_list, [file3, file2])
-
- def test_hash_cleanup_listdir_purge_old_ts(self):
- # A single old .ts file will be removed
- file1 = Timestamp(time() - (diskfile.ONE_WEEK + 1)).internal + '.ts'
- file_list = [file1]
- self.check_hash_cleanup_listdir(file_list, [])
-
- def test_hash_cleanup_listdir_meta_keeps_old_ts(self):
- # An orphaned .meta will not clean up a very old .ts
- file1 = Timestamp(time() - (diskfile.ONE_WEEK + 1)).internal + '.ts'
- file2 = Timestamp(time() + 2).internal + '.meta'
- file_list = [file1, file2]
- self.check_hash_cleanup_listdir(file_list, [file2, file1])
-
- def test_hash_cleanup_listdir_keep_single_old_data(self):
- # A single old .data file will not be removed
- file1 = Timestamp(time() - (diskfile.ONE_WEEK + 1)).internal + '.data'
- file_list = [file1]
- self.check_hash_cleanup_listdir(file_list, [file1])
-
- def test_hash_cleanup_listdir_keep_single_old_meta(self):
- # A single old .meta file will not be removed
- file1 = Timestamp(time() - (diskfile.ONE_WEEK + 1)).internal + '.meta'
- file_list = [file1]
- self.check_hash_cleanup_listdir(file_list, [file1])
-
- def test_hash_cleanup_listdir_disappeared_path(self):
- # Next line listing a non-existent dir used to propagate the OSError;
- # now should mute that.
- self.assertEqual(diskfile.hash_cleanup_listdir(uuid.uuid4().hex), [])
-
- def test_hash_cleanup_listdir_disappeared_before_unlink_1(self):
- # Timestamp 1 makes other test routines pretend the file disappeared
- # while working.
- file1 = '0000000001.00000.ts'
- file_list = [file1]
- self.check_hash_cleanup_listdir(file_list, [])
-
- def test_hash_cleanup_listdir_disappeared_before_unlink_2(self):
- # Timestamp 1 makes other test routines pretend the file disappeared
- # while working.
- file1 = '0000000001.00000.data'
- file2 = '0000000002.00000.ts'
- file_list = [file1, file2]
- self.check_hash_cleanup_listdir(file_list, [file2])
-
@patch_policies
class TestObjectAuditLocationGenerator(unittest.TestCase):
@@ -677,7 +266,8 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
pass
def test_audit_location_class(self):
- al = diskfile.AuditLocation('abc', '123', '_-_')
+ al = diskfile.AuditLocation('abc', '123', '_-_',
+ policy=POLICIES.legacy)
self.assertEqual(str(al), 'abc')
def test_finding_of_hashdirs(self):
@@ -705,6 +295,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
"6c3",
"fcd938702024c25fef6c32fef05298eb"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects-fud", "foo"))
+ os.makedirs(os.path.join(tmpdir, "sdq", "objects-+1", "foo"))
self._make_file(os.path.join(tmpdir, "sdp", "objects", "1519",
"fed"))
@@ -723,7 +314,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
"4f9eee668b66c6f0250bfa3c7ab9e51e"))
logger = debug_logger()
- locations = [(loc.path, loc.device, loc.partition)
+ locations = [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=False,
logger=logger)]
@@ -732,44 +323,42 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
# expect some warnings about those bad dirs
warnings = logger.get_lines_for_level('warning')
self.assertEqual(set(warnings), set([
- 'Directory objects- does not map to a valid policy',
- 'Directory objects-2 does not map to a valid policy',
- 'Directory objects-99 does not map to a valid policy',
- 'Directory objects-fud does not map to a valid policy']))
+ ("Directory 'objects-' does not map to a valid policy "
+ "(Unknown policy, for index '')"),
+ ("Directory 'objects-2' does not map to a valid policy "
+ "(Unknown policy, for index '2')"),
+ ("Directory 'objects-99' does not map to a valid policy "
+ "(Unknown policy, for index '99')"),
+ ("Directory 'objects-fud' does not map to a valid policy "
+ "(Unknown policy, for index 'fud')"),
+ ("Directory 'objects-+1' does not map to a valid policy "
+ "(Unknown policy, for index '+1')"),
+ ]))
expected = \
[(os.path.join(tmpdir, "sdp", "objects-1", "9970", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"),
- "sdp", "9970"),
+ "sdp", "9970", POLICIES[1]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"5c1fdc1ffb12e5eaf84edc30d8b67aca"),
- "sdp", "1519"),
+ "sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"fdfd184d39080020bc8b487f8a7beaca"),
- "sdp", "1519"),
+ "sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "df2",
"b0fe7af831cc7b1af5bf486b1c841df2"),
- "sdp", "1519"),
+ "sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "9720", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"),
- "sdp", "9720"),
- (os.path.join(tmpdir, "sdq", "objects-", "1135", "6c3",
- "fcd938702024c25fef6c32fef05298eb"),
- "sdq", "1135"),
- (os.path.join(tmpdir, "sdq", "objects-2", "9971", "8eb",
- "fcd938702024c25fef6c32fef05298eb"),
- "sdq", "9971"),
- (os.path.join(tmpdir, "sdq", "objects-99", "9972", "8eb",
- "fcd938702024c25fef6c32fef05298eb"),
- "sdq", "9972"),
+ "sdp", "9720", POLICIES[0]),
(os.path.join(tmpdir, "sdq", "objects", "3071", "8eb",
"fcd938702024c25fef6c32fef05298eb"),
- "sdq", "3071"),
+ "sdq", "3071", POLICIES[0]),
]
self.assertEqual(locations, expected)
# now without a logger
- locations = [(loc.path, loc.device, loc.partition)
+ locations = [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=False)]
locations.sort()
@@ -789,7 +378,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
"4993d582f41be9771505a8d4cb237a10"))
locations = [
- (loc.path, loc.device, loc.partition)
+ (loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=True)]
locations.sort()
@@ -799,12 +388,12 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
[(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"),
- "sdp", "2607")])
+ "sdp", "2607", POLICIES[0])])
# Do it again, this time with a logger.
ml = mock.MagicMock()
locations = [
- (loc.path, loc.device, loc.partition)
+ (loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, mount_check=True, logger=ml)]
ml.debug.assert_called_once_with(
@@ -817,7 +406,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
# only normal FS corruption should be skipped over silently.
def list_locations(dirname):
- return [(loc.path, loc.device, loc.partition)
+ return [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=dirname, mount_check=False)]
@@ -843,7 +432,45 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
self.assertRaises(OSError, list_locations, tmpdir)
-class TestDiskFileManager(unittest.TestCase):
+class TestDiskFileRouter(unittest.TestCase):
+
+ def test_register(self):
+ with mock.patch.dict(
+ diskfile.DiskFileRouter.policy_type_to_manager_cls, {}):
+ @diskfile.DiskFileRouter.register('test-policy')
+ class TestDiskFileManager(diskfile.DiskFileManager):
+ pass
+
+ @BaseStoragePolicy.register('test-policy')
+ class TestStoragePolicy(BaseStoragePolicy):
+ pass
+
+ with patch_policies([TestStoragePolicy(0, 'test')]):
+ router = diskfile.DiskFileRouter({}, debug_logger('test'))
+ manager = router[POLICIES.default]
+ self.assertTrue(isinstance(manager, TestDiskFileManager))
+
+
+class BaseDiskFileTestMixin(object):
+ """
+ Bag of helpers that are useful in the per-policy DiskFile test classes.
+ """
+
+ def _manager_mock(self, manager_attribute_name, df=None):
+ mgr_cls = df._manager.__class__ if df else self.mgr_cls
+ return '.'.join([
+ mgr_cls.__module__, mgr_cls.__name__, manager_attribute_name])
+
+
+class DiskFileManagerMixin(BaseDiskFileTestMixin):
+ """
+ Abstract test method mixin for concrete test cases - this class
+ won't get picked up by test runners because it doesn't subclass
+ unittest.TestCase and doesn't have [Tt]est in the name.
+ """
+
+ # set mgr_cls on subclasses
+ mgr_cls = None
def setUp(self):
self.tmpdir = mkdtemp()
@@ -851,17 +478,111 @@ class TestDiskFileManager(unittest.TestCase):
self.tmpdir, 'tmp_test_obj_server_DiskFile')
self.existing_device1 = 'sda1'
self.existing_device2 = 'sda2'
- mkdirs(os.path.join(self.testdir, self.existing_device1, 'tmp'))
- mkdirs(os.path.join(self.testdir, self.existing_device2, 'tmp'))
+ for policy in POLICIES:
+ mkdirs(os.path.join(self.testdir, self.existing_device1,
+ diskfile.get_tmp_dir(policy)))
+ mkdirs(os.path.join(self.testdir, self.existing_device2,
+ diskfile.get_tmp_dir(policy)))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(devices=self.testdir, mount_check='false',
keep_cache_size=2 * 1024)
- self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
+ self.logger = debug_logger('test-' + self.__class__.__name__)
+ self.df_mgr = self.mgr_cls(self.conf, self.logger)
+ self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
def tearDown(self):
rmtree(self.tmpdir, ignore_errors=1)
+ def _get_diskfile(self, policy, frag_index=None):
+ df_mgr = self.df_router[policy]
+ return df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
+ policy=policy, frag_index=frag_index)
+
+ def _test_get_ondisk_files(self, scenarios, policy,
+ frag_index=None):
+ class_under_test = self._get_diskfile(policy, frag_index=frag_index)
+ with mock.patch('swift.obj.diskfile.os.listdir',
+ lambda _: []):
+ self.assertEqual((None, None, None),
+ class_under_test._get_ondisk_file())
+
+ returned_ext_order = ('.data', '.meta', '.ts')
+ for test in scenarios:
+ chosen = dict((f[1], os.path.join(class_under_test._datadir, f[0]))
+ for f in test if f[1])
+ expected = tuple(chosen.get(ext) for ext in returned_ext_order)
+ files = list(zip(*test)[0])
+ for _order in ('ordered', 'shuffled', 'shuffled'):
+ class_under_test = self._get_diskfile(policy, frag_index)
+ try:
+ with mock.patch('swift.obj.diskfile.os.listdir',
+ lambda _: files):
+ actual = class_under_test._get_ondisk_file()
+ self.assertEqual(expected, actual,
+ 'Expected %s from %s but got %s'
+ % (expected, files, actual))
+ except AssertionError as e:
+ self.fail('%s with files %s' % (str(e), files))
+ shuffle(files)
+
+ def _test_hash_cleanup_listdir_files(self, scenarios, policy,
+ reclaim_age=None):
+ # check that expected files are left in hashdir after cleanup
+ for test in scenarios:
+ class_under_test = self.df_router[policy]
+ files = list(zip(*test)[0])
+ hashdir = os.path.join(self.testdir, str(uuid.uuid4()))
+ os.mkdir(hashdir)
+ for fname in files:
+ open(os.path.join(hashdir, fname), 'w')
+ expected_after_cleanup = set([f[0] for f in test
+ if (f[2] if len(f) > 2 else f[1])])
+ if reclaim_age:
+ class_under_test.hash_cleanup_listdir(
+ hashdir, reclaim_age=reclaim_age)
+ else:
+ with mock.patch('swift.obj.diskfile.time') as mock_time:
+ # don't reclaim anything
+ mock_time.time.return_value = 0.0
+ class_under_test.hash_cleanup_listdir(hashdir)
+ after_cleanup = set(os.listdir(hashdir))
+ errmsg = "expected %r, got %r for test %r" % (
+ sorted(expected_after_cleanup), sorted(after_cleanup), test
+ )
+ self.assertEqual(expected_after_cleanup, after_cleanup, errmsg)
+
+ def _test_yield_hashes_cleanup(self, scenarios, policy):
+ # opportunistic test to check that yield_hashes cleans up dir using
+ # same scenarios as passed to _test_hash_cleanup_listdir_files
+ for test in scenarios:
+ class_under_test = self.df_router[policy]
+ files = list(zip(*test)[0])
+ dev_path = os.path.join(self.testdir, str(uuid.uuid4()))
+ hashdir = os.path.join(
+ dev_path, diskfile.get_data_dir(policy),
+ '0', 'abc', '9373a92d072897b136b3fc06595b4abc')
+ os.makedirs(hashdir)
+ for fname in files:
+ open(os.path.join(hashdir, fname), 'w')
+ expected_after_cleanup = set([f[0] for f in test
+ if f[1] or len(f) > 2 and f[2]])
+ with mock.patch('swift.obj.diskfile.time') as mock_time:
+ # don't reclaim anything
+ mock_time.time.return_value = 0.0
+ mock_func = 'swift.obj.diskfile.DiskFileManager.get_dev_path'
+ with mock.patch(mock_func) as mock_path:
+ mock_path.return_value = dev_path
+ for _ in class_under_test.yield_hashes(
+ 'ignored', '0', policy, suffixes=['abc']):
+ # return values are tested in test_yield_hashes_*
+ pass
+ after_cleanup = set(os.listdir(hashdir))
+ errmsg = "expected %r, got %r for test %r" % (
+ sorted(expected_after_cleanup), sorted(after_cleanup), test
+ )
+ self.assertEqual(expected_after_cleanup, after_cleanup, errmsg)
+
def test_construct_dev_path(self):
res_path = self.df_mgr.construct_dev_path('abc')
self.assertEqual(os.path.join(self.df_mgr.devices, 'abc'), res_path)
@@ -872,12 +593,13 @@ class TestDiskFileManager(unittest.TestCase):
with mock.patch('swift.obj.diskfile.write_pickle') as wp:
self.df_mgr.pickle_async_update(self.existing_device1,
'a', 'c', 'o',
- dict(a=1, b=2), ts, 0)
+ dict(a=1, b=2), ts, POLICIES[0])
dp = self.df_mgr.construct_dev_path(self.existing_device1)
ohash = diskfile.hash_path('a', 'c', 'o')
wp.assert_called_with({'a': 1, 'b': 2},
- os.path.join(dp, diskfile.get_async_dir(0),
- ohash[-3:], ohash + '-' + ts),
+ os.path.join(
+ dp, diskfile.get_async_dir(POLICIES[0]),
+ ohash[-3:], ohash + '-' + ts),
os.path.join(dp, 'tmp'))
self.df_mgr.logger.increment.assert_called_with('async_pendings')
@@ -885,32 +607,16 @@ class TestDiskFileManager(unittest.TestCase):
locations = list(self.df_mgr.object_audit_location_generator())
self.assertEqual(locations, [])
- def test_get_hashes_bad_dev(self):
- self.df_mgr.mount_check = True
- with mock.patch('swift.obj.diskfile.check_mount',
- mock.MagicMock(side_effect=[False])):
- self.assertRaises(DiskFileDeviceUnavailable,
- self.df_mgr.get_hashes, 'sdb1', '0', '123',
- 'objects')
-
- def test_get_hashes_w_nothing(self):
- hashes = self.df_mgr.get_hashes(self.existing_device1, '0', '123', '0')
- self.assertEqual(hashes, {})
- # get_hashes creates the partition path, so call again for code
- # path coverage, ensuring the result is unchanged
- hashes = self.df_mgr.get_hashes(self.existing_device1, '0', '123', '0')
- self.assertEqual(hashes, {})
-
def test_replication_lock_on(self):
# Double check settings
self.df_mgr.replication_one_per_device = True
self.df_mgr.replication_lock_timeout = 0.1
dev_path = os.path.join(self.testdir, self.existing_device1)
- with self.df_mgr.replication_lock(dev_path):
+ with self.df_mgr.replication_lock(self.existing_device1):
lock_exc = None
exc = None
try:
- with self.df_mgr.replication_lock(dev_path):
+ with self.df_mgr.replication_lock(self.existing_device1):
raise Exception(
'%r was not replication locked!' % dev_path)
except ReplicationLockTimeout as err:
@@ -943,12 +649,10 @@ class TestDiskFileManager(unittest.TestCase):
# Double check settings
self.df_mgr.replication_one_per_device = True
self.df_mgr.replication_lock_timeout = 0.1
- dev_path = os.path.join(self.testdir, self.existing_device1)
- dev_path2 = os.path.join(self.testdir, self.existing_device2)
- with self.df_mgr.replication_lock(dev_path):
+ with self.df_mgr.replication_lock(self.existing_device1):
lock_exc = None
try:
- with self.df_mgr.replication_lock(dev_path2):
+ with self.df_mgr.replication_lock(self.existing_device2):
pass
except ReplicationLockTimeout as err:
lock_exc = err
@@ -965,10 +669,1094 @@ class TestDiskFileManager(unittest.TestCase):
self.assertTrue('splice()' in warnings[-1])
self.assertFalse(mgr.use_splice)
+ def test_get_diskfile_from_hash_dev_path_fail(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ hclistdir.return_value = ['1381679759.90941.data']
+ readmeta.return_value = {'name': '/a/c/o'}
+ self.assertRaises(
+ DiskFileDeviceUnavailable,
+ self.df_mgr.get_diskfile_from_hash,
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
+
+ def test_get_diskfile_from_hash_not_dir(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata'),
+ mock.patch(self._manager_mock('quarantine_renamer'))) as \
+ (dfclass, hclistdir, readmeta, quarantine_renamer):
+ osexc = OSError()
+ osexc.errno = errno.ENOTDIR
+ hclistdir.side_effect = osexc
+ readmeta.return_value = {'name': '/a/c/o'}
+ self.assertRaises(
+ DiskFileNotExist,
+ self.df_mgr.get_diskfile_from_hash,
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
+ quarantine_renamer.assert_called_once_with(
+ '/srv/dev/',
+ '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900')
+
+ def test_get_diskfile_from_hash_no_dir(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ osexc = OSError()
+ osexc.errno = errno.ENOENT
+ hclistdir.side_effect = osexc
+ readmeta.return_value = {'name': '/a/c/o'}
+ self.assertRaises(
+ DiskFileNotExist,
+ self.df_mgr.get_diskfile_from_hash,
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
+
+ def test_get_diskfile_from_hash_other_oserror(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ osexc = OSError()
+ hclistdir.side_effect = osexc
+ readmeta.return_value = {'name': '/a/c/o'}
+ self.assertRaises(
+ OSError,
+ self.df_mgr.get_diskfile_from_hash,
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
+
+ def test_get_diskfile_from_hash_no_actual_files(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ hclistdir.return_value = []
+ readmeta.return_value = {'name': '/a/c/o'}
+ self.assertRaises(
+ DiskFileNotExist,
+ self.df_mgr.get_diskfile_from_hash,
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
+
+ def test_get_diskfile_from_hash_read_metadata_problem(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ hclistdir.return_value = ['1381679759.90941.data']
+ readmeta.side_effect = EOFError()
+ self.assertRaises(
+ DiskFileNotExist,
+ self.df_mgr.get_diskfile_from_hash,
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
+
+ def test_get_diskfile_from_hash_no_meta_name(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ hclistdir.return_value = ['1381679759.90941.data']
+ readmeta.return_value = {}
+ try:
+ self.df_mgr.get_diskfile_from_hash(
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900',
+ POLICIES[0])
+ except DiskFileNotExist as err:
+ exc = err
+ self.assertEqual(str(exc), '')
+
+ def test_get_diskfile_from_hash_bad_meta_name(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ hclistdir.return_value = ['1381679759.90941.data']
+ readmeta.return_value = {'name': 'bad'}
+ try:
+ self.df_mgr.get_diskfile_from_hash(
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900',
+ POLICIES[0])
+ except DiskFileNotExist as err:
+ exc = err
+ self.assertEqual(str(exc), '')
+
+ def test_get_diskfile_from_hash(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
+ with nested(
+ mock.patch(self._manager_mock('diskfile_cls')),
+ mock.patch(self._manager_mock('hash_cleanup_listdir')),
+ mock.patch('swift.obj.diskfile.read_metadata')) as \
+ (dfclass, hclistdir, readmeta):
+ hclistdir.return_value = ['1381679759.90941.data']
+ readmeta.return_value = {'name': '/a/c/o'}
+ self.df_mgr.get_diskfile_from_hash(
+ 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
+ dfclass.assert_called_once_with(
+ self.df_mgr, '/srv/dev/', self.df_mgr.threadpools['dev'], '9',
+ 'a', 'c', 'o', policy=POLICIES[0])
+ hclistdir.assert_called_once_with(
+ '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900',
+ 604800)
+ readmeta.assert_called_once_with(
+ '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/'
+ '1381679759.90941.data')
+
+ def test_listdir_enoent(self):
+ oserror = OSError()
+ oserror.errno = errno.ENOENT
+ self.df_mgr.logger.error = mock.MagicMock()
+ with mock.patch('os.listdir', side_effect=oserror):
+ self.assertEqual(self.df_mgr._listdir('path'), [])
+ self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
+
+ def test_listdir_other_oserror(self):
+ oserror = OSError()
+ self.df_mgr.logger.error = mock.MagicMock()
+ with mock.patch('os.listdir', side_effect=oserror):
+ self.assertEqual(self.df_mgr._listdir('path'), [])
+ self.df_mgr.logger.error.assert_called_once_with(
+ 'ERROR: Skipping %r due to error with listdir attempt: %s',
+ 'path', oserror)
+
+ def test_listdir(self):
+ self.df_mgr.logger.error = mock.MagicMock()
+ with mock.patch('os.listdir', return_value=['abc', 'def']):
+ self.assertEqual(self.df_mgr._listdir('path'), ['abc', 'def'])
+ self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
+
+ def test_yield_suffixes_dev_path_fail(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
+ exc = None
+ try:
+ list(self.df_mgr.yield_suffixes(self.existing_device1, '9', 0))
+ except DiskFileDeviceUnavailable as err:
+ exc = err
+ self.assertEqual(str(exc), '')
+
+ def test_yield_suffixes(self):
+ self.df_mgr._listdir = mock.MagicMock(return_value=[
+ 'abc', 'def', 'ghi', 'abcd', '012'])
+ dev = self.existing_device1
+ self.assertEqual(
+ list(self.df_mgr.yield_suffixes(dev, '9', POLICIES[0])),
+ [(self.testdir + '/' + dev + '/objects/9/abc', 'abc'),
+ (self.testdir + '/' + dev + '/objects/9/def', 'def'),
+ (self.testdir + '/' + dev + '/objects/9/012', '012')])
+
+ def test_yield_hashes_dev_path_fail(self):
+ self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
+ exc = None
+ try:
+ list(self.df_mgr.yield_hashes(self.existing_device1, '9',
+ POLICIES[0]))
+ except DiskFileDeviceUnavailable as err:
+ exc = err
+ self.assertEqual(str(exc), '')
+
+ def test_yield_hashes_empty(self):
+ def _listdir(path):
+ return []
+
+ with mock.patch('os.listdir', _listdir):
+ self.assertEqual(list(self.df_mgr.yield_hashes(
+ self.existing_device1, '9', POLICIES[0])), [])
+
+ def test_yield_hashes_empty_suffixes(self):
+ def _listdir(path):
+ return []
+
+ with mock.patch('os.listdir', _listdir):
+ self.assertEqual(
+ list(self.df_mgr.yield_hashes(self.existing_device1, '9',
+ POLICIES[0],
+ suffixes=['456'])), [])
+
+ def _check_yield_hashes(self, policy, suffix_map, expected, **kwargs):
+ device = self.existing_device1
+ part = '9'
+ part_path = os.path.join(
+ self.testdir, device, diskfile.get_data_dir(policy), part)
+
+ def _listdir(path):
+ if path == part_path:
+ return suffix_map.keys()
+ for suff, hash_map in suffix_map.items():
+ if path == os.path.join(part_path, suff):
+ return hash_map.keys()
+ for hash_, files in hash_map.items():
+ if path == os.path.join(part_path, suff, hash_):
+ return files
+ self.fail('Unexpected listdir of %r' % path)
+ expected_items = [
+ (os.path.join(part_path, hash_[-3:], hash_), hash_,
+ Timestamp(ts).internal)
+ for hash_, ts in expected.items()]
+ with nested(
+ mock.patch('os.listdir', _listdir),
+ mock.patch('os.unlink')):
+ df_mgr = self.df_router[policy]
+ hash_items = list(df_mgr.yield_hashes(
+ device, part, policy, **kwargs))
+ expected = sorted(expected_items)
+ actual = sorted(hash_items)
+ self.assertEqual(actual, expected,
+ 'Expected %s but got %s' % (expected, actual))
+
+ def test_yield_hashes_tombstones(self):
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ ts2 = next(ts_iter)
+ ts3 = next(ts_iter)
+ suffix_map = {
+ '27e': {
+ '1111111111111111111111111111127e': [
+ ts1.internal + '.ts'],
+ '2222222222222222222222222222227e': [
+ ts2.internal + '.ts'],
+ },
+ 'd41': {
+ 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': []
+ },
+ 'd98': {},
+ '00b': {
+ '3333333333333333333333333333300b': [
+ ts1.internal + '.ts',
+ ts2.internal + '.ts',
+ ts3.internal + '.ts',
+ ]
+ },
+ '204': {
+ 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbb204': [
+ ts3.internal + '.ts',
+ ]
+ }
+ }
+ expected = {
+ '1111111111111111111111111111127e': ts1.internal,
+ '2222222222222222222222222222227e': ts2.internal,
+ '3333333333333333333333333333300b': ts3.internal,
+ }
+ for policy in POLICIES:
+ self._check_yield_hashes(policy, suffix_map, expected,
+ suffixes=['27e', '00b'])
+
@patch_policies
-class TestDiskFile(unittest.TestCase):
- """Test swift.obj.diskfile.DiskFile"""
+class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
+
+ mgr_cls = diskfile.DiskFileManager
+
+ def test_get_ondisk_files_with_repl_policy(self):
+ # Each scenario specifies a list of (filename, extension) tuples. If
+ # extension is set then that filename should be returned by the method
+ # under test for that extension type.
+ scenarios = [[('0000000007.00000.data', '.data')],
+
+ [('0000000007.00000.ts', '.ts')],
+
+ # older tombstone is ignored
+ [('0000000007.00000.ts', '.ts'),
+ ('0000000006.00000.ts', False)],
+
+ # older data is ignored
+ [('0000000007.00000.data', '.data'),
+ ('0000000006.00000.data', False),
+ ('0000000004.00000.ts', False)],
+
+ # newest meta trumps older meta
+ [('0000000009.00000.meta', '.meta'),
+ ('0000000008.00000.meta', False),
+ ('0000000007.00000.data', '.data'),
+ ('0000000004.00000.ts', False)],
+
+ # meta older than data is ignored
+ [('0000000007.00000.data', '.data'),
+ ('0000000006.00000.meta', False),
+ ('0000000004.00000.ts', False)],
+
+ # meta without data is ignored
+ [('0000000007.00000.meta', False, True),
+ ('0000000006.00000.ts', '.ts'),
+ ('0000000004.00000.data', False)],
+
+ # tombstone trumps meta and data at same timestamp
+ [('0000000006.00000.meta', False),
+ ('0000000006.00000.ts', '.ts'),
+ ('0000000006.00000.data', False)],
+ ]
+
+ self._test_get_ondisk_files(scenarios, POLICIES[0], None)
+ self._test_hash_cleanup_listdir_files(scenarios, POLICIES[0])
+ self._test_yield_hashes_cleanup(scenarios, POLICIES[0])
+
+ def test_get_ondisk_files_with_stray_meta(self):
+ # get_ondisk_files does not tolerate a stray .meta file
+
+ class_under_test = self._get_diskfile(POLICIES[0])
+ files = ['0000000007.00000.meta']
+
+ self.assertRaises(AssertionError,
+ class_under_test.manager.get_ondisk_files, files,
+ self.testdir)
+
+ def test_yield_hashes(self):
+ old_ts = '1383180000.12345'
+ fresh_ts = Timestamp(time() - 10).internal
+ fresher_ts = Timestamp(time() - 1).internal
+ suffix_map = {
+ 'abc': {
+ '9373a92d072897b136b3fc06595b4abc': [
+ fresh_ts + '.ts'],
+ },
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ old_ts + '.data'],
+ '9373a92d072897b136b3fc06595b7456': [
+ fresh_ts + '.ts',
+ fresher_ts + '.data'],
+ },
+ 'def': {},
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b4abc': fresh_ts,
+ '9373a92d072897b136b3fc06595b0456': old_ts,
+ '9373a92d072897b136b3fc06595b7456': fresher_ts,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected)
+
+ def test_yield_hashes_yields_meta_timestamp(self):
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ ts2 = next(ts_iter)
+ ts3 = next(ts_iter)
+ suffix_map = {
+ 'abc': {
+ '9373a92d072897b136b3fc06595b4abc': [
+ ts1.internal + '.ts',
+ ts2.internal + '.meta'],
+ },
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ ts1.internal + '.data',
+ ts2.internal + '.meta',
+ ts3.internal + '.meta'],
+ '9373a92d072897b136b3fc06595b7456': [
+ ts1.internal + '.data',
+ ts2.internal + '.meta'],
+ },
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b4abc': ts2,
+ '9373a92d072897b136b3fc06595b0456': ts3,
+ '9373a92d072897b136b3fc06595b7456': ts2,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected)
+
+ def test_yield_hashes_suffix_filter(self):
+ # test again with limited suffixes
+ old_ts = '1383180000.12345'
+ fresh_ts = Timestamp(time() - 10).internal
+ fresher_ts = Timestamp(time() - 1).internal
+ suffix_map = {
+ 'abc': {
+ '9373a92d072897b136b3fc06595b4abc': [
+ fresh_ts + '.ts'],
+ },
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ old_ts + '.data'],
+ '9373a92d072897b136b3fc06595b7456': [
+ fresh_ts + '.ts',
+ fresher_ts + '.data'],
+ },
+ 'def': {},
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': old_ts,
+ '9373a92d072897b136b3fc06595b7456': fresher_ts,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ suffixes=['456'])
+
+ def test_yield_hashes_fails_with_bad_ondisk_filesets(self):
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ suffix_map = {
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ ts1.internal + '.data'],
+ '9373a92d072897b136b3fc06595ba456': [
+ ts1.internal + '.meta'],
+ },
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': ts1,
+ }
+ try:
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+ self.fail('Expected AssertionError')
+ except AssertionError:
+ pass
+
+
+@patch_policies(with_ec_default=True)
+class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
+
+ mgr_cls = diskfile.ECDiskFileManager
+
+ def test_get_ondisk_files_with_ec_policy(self):
+ # Each scenario specifies a list of (filename, extension, [survives])
+ # tuples. If extension is set then that filename should be returned by
+ # the method under test for that extension type. If the optional
+ # 'survives' is True, the filename should still be in the dir after
+ # cleanup.
+ scenarios = [[('0000000007.00000.ts', '.ts')],
+
+ [('0000000007.00000.ts', '.ts'),
+ ('0000000006.00000.ts', False)],
+
+ # highest frag index is chosen by default
+ [('0000000007.00000.durable', '.durable'),
+ ('0000000007.00000#1.data', '.data'),
+ ('0000000007.00000#0.data', False, True)],
+
+ # data with no durable is ignored
+ [('0000000007.00000#0.data', False, True)],
+
+ # data newer than durable is ignored
+ [('0000000008.00000#1.data', False, True),
+ ('0000000007.00000.durable', '.durable'),
+ ('0000000007.00000#1.data', '.data'),
+ ('0000000007.00000#0.data', False, True)],
+
+ # data newer than durable ignored, even if its only data
+ [('0000000008.00000#1.data', False, True),
+ ('0000000007.00000.durable', False, False)],
+
+ # data older than durable is ignored
+ [('0000000007.00000.durable', '.durable'),
+ ('0000000007.00000#1.data', '.data'),
+ ('0000000006.00000#1.data', False),
+ ('0000000004.00000.ts', False)],
+
+ # data older than durable ignored, even if its only data
+ [('0000000007.00000.durable', False, False),
+ ('0000000006.00000#1.data', False),
+ ('0000000004.00000.ts', False)],
+
+ # newer meta trumps older meta
+ [('0000000009.00000.meta', '.meta'),
+ ('0000000008.00000.meta', False),
+ ('0000000007.00000.durable', '.durable'),
+ ('0000000007.00000#14.data', '.data'),
+ ('0000000004.00000.ts', False)],
+
+ # older meta is ignored
+ [('0000000007.00000.durable', '.durable'),
+ ('0000000007.00000#14.data', '.data'),
+ ('0000000006.00000.meta', False),
+ ('0000000004.00000.ts', False)],
+
+ # tombstone trumps meta, data, durable at older timestamp
+ [('0000000006.00000.ts', '.ts'),
+ ('0000000005.00000.meta', False),
+ ('0000000004.00000.durable', False),
+ ('0000000004.00000#0.data', False)],
+
+ # tombstone trumps meta, data, durable at same timestamp
+ [('0000000006.00000.meta', False),
+ ('0000000006.00000.ts', '.ts'),
+ ('0000000006.00000.durable', False),
+ ('0000000006.00000#0.data', False)],
+
+ # missing durable invalidates data
+ [('0000000006.00000.meta', False, True),
+ ('0000000006.00000#0.data', False, True)]
+ ]
+
+ self._test_get_ondisk_files(scenarios, POLICIES.default, None)
+ self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default)
+ self._test_yield_hashes_cleanup(scenarios, POLICIES.default)
+
+ def test_get_ondisk_files_with_ec_policy_and_frag_index(self):
+ # Each scenario specifies a list of (filename, extension) tuples. If
+ # extension is set then that filename should be returned by the method
+ # under test for that extension type.
+ scenarios = [[('0000000007.00000#2.data', False, True),
+ ('0000000007.00000#1.data', '.data'),
+ ('0000000007.00000#0.data', False, True),
+ ('0000000007.00000.durable', '.durable')],
+
+ # specific frag newer than durable is ignored
+ [('0000000007.00000#2.data', False, True),
+ ('0000000007.00000#1.data', False, True),
+ ('0000000007.00000#0.data', False, True),
+ ('0000000006.00000.durable', '.durable')],
+
+ # specific frag older than durable is ignored
+ [('0000000007.00000#2.data', False),
+ ('0000000007.00000#1.data', False),
+ ('0000000007.00000#0.data', False),
+ ('0000000008.00000.durable', '.durable')],
+
+ # specific frag older than newest durable is ignored
+ # even if is also has a durable
+ [('0000000007.00000#2.data', False),
+ ('0000000007.00000#1.data', False),
+ ('0000000007.00000.durable', False),
+ ('0000000008.00000#0.data', False),
+ ('0000000008.00000.durable', '.durable')],
+
+ # meta included when frag index is specified
+ [('0000000009.00000.meta', '.meta'),
+ ('0000000007.00000#2.data', False, True),
+ ('0000000007.00000#1.data', '.data'),
+ ('0000000007.00000#0.data', False, True),
+ ('0000000007.00000.durable', '.durable')],
+
+ # specific frag older than tombstone is ignored
+ [('0000000009.00000.ts', '.ts'),
+ ('0000000007.00000#2.data', False),
+ ('0000000007.00000#1.data', False),
+ ('0000000007.00000#0.data', False),
+ ('0000000007.00000.durable', False)],
+
+ # no data file returned if specific frag index missing
+ [('0000000007.00000#2.data', False, True),
+ ('0000000007.00000#14.data', False, True),
+ ('0000000007.00000#0.data', False, True),
+ ('0000000007.00000.durable', '.durable')],
+
+ # meta ignored if specific frag index missing
+ [('0000000008.00000.meta', False, True),
+ ('0000000007.00000#14.data', False, True),
+ ('0000000007.00000#0.data', False, True),
+ ('0000000007.00000.durable', '.durable')],
+
+ # meta ignored if no data files
+ # Note: this is anomalous, because we are specifying a
+ # frag_index, get_ondisk_files will tolerate .meta with
+ # no .data
+ [('0000000088.00000.meta', False, True),
+ ('0000000077.00000.durable', '.durable')]
+ ]
+
+ self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1)
+ # note: not calling self._test_hash_cleanup_listdir_files(scenarios, 0)
+ # here due to the anomalous scenario as commented above
+
+ def test_hash_cleanup_listdir_reclaim(self):
+ # Each scenario specifies a list of (filename, extension, [survives])
+ # tuples. If extension is set or 'survives' is True, the filename
+ # should still be in the dir after cleanup.
+ much_older = Timestamp(time() - 2000).internal
+ older = Timestamp(time() - 1001).internal
+ newer = Timestamp(time() - 900).internal
+ scenarios = [[('%s.ts' % older, False, False)],
+
+ # fresh tombstone is preserved
+ [('%s.ts' % newer, '.ts', True)],
+
+ # isolated .durable is cleaned up immediately
+ [('%s.durable' % newer, False, False)],
+
+ # ...even when other older files are in dir
+ [('%s.durable' % older, False, False),
+ ('%s.ts' % much_older, False, False)],
+
+ # isolated .data files are cleaned up when stale
+ [('%s#2.data' % older, False, False),
+ ('%s#4.data' % older, False, False)],
+
+ # ...even when there is an older durable fileset
+ [('%s#2.data' % older, False, False),
+ ('%s#4.data' % older, False, False),
+ ('%s#2.data' % much_older, '.data', True),
+ ('%s#4.data' % much_older, False, True),
+ ('%s.durable' % much_older, '.durable', True)],
+
+ # ... but preserved if still fresh
+ [('%s#2.data' % newer, False, True),
+ ('%s#4.data' % newer, False, True)],
+
+ # ... and we could have a mixture of fresh and stale .data
+ [('%s#2.data' % newer, False, True),
+ ('%s#4.data' % older, False, False)],
+
+ # TODO these remaining scenarios exhibit different
+ # behavior than the legacy replication DiskFileManager
+ # behavior...
+
+ # tombstone reclaimed despite newer non-durable data
+ [('%s#2.data' % newer, False, True),
+ ('%s#4.data' % older, False, False),
+ ('%s.ts' % much_older, '.ts', False)],
+
+ # tombstone reclaimed despite newer non-durable data
+ [('%s.ts' % older, '.ts', False),
+ ('%s.durable' % much_older, False, False)],
+
+ # tombstone reclaimed despite junk file
+ [('junk', False, True),
+ ('%s.ts' % much_older, '.ts', False)],
+ ]
+
+ self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default,
+ reclaim_age=1000)
+
+ def test_get_ondisk_files_with_stray_meta(self):
+ # get_ondisk_files does not tolerate a stray .meta file
+ scenarios = [['0000000007.00000.meta'],
+
+ ['0000000007.00000.meta',
+ '0000000006.00000.durable'],
+
+ ['0000000007.00000.meta',
+ '0000000006.00000#1.data'],
+
+ ['0000000007.00000.meta',
+ '0000000006.00000.durable',
+ '0000000005.00000#1.data']
+ ]
+ for files in scenarios:
+ class_under_test = self._get_diskfile(POLICIES.default)
+ self.assertRaises(DiskFileNotExist, class_under_test.open)
+
+ def test_parse_on_disk_filename(self):
+ mgr = self.df_router[POLICIES.default]
+ for ts in (Timestamp('1234567890.00001'),
+ Timestamp('1234567890.00001', offset=17)):
+ for frag in (0, 2, 14):
+ fname = '%s#%s.data' % (ts.internal, frag)
+ info = mgr.parse_on_disk_filename(fname)
+ self.assertEqual(ts, info['timestamp'])
+ self.assertEqual(frag, info['frag_index'])
+ self.assertEqual(mgr.make_on_disk_filename(**info), fname)
+
+ for ext in ('.meta', '.durable', '.ts'):
+ fname = '%s%s' % (ts.internal, ext)
+ info = mgr.parse_on_disk_filename(fname)
+ self.assertEqual(ts, info['timestamp'])
+ self.assertEqual(None, info['frag_index'])
+ self.assertEqual(mgr.make_on_disk_filename(**info), fname)
+
+ def test_parse_on_disk_filename_errors(self):
+ mgr = self.df_router[POLICIES.default]
+ for ts in (Timestamp('1234567890.00001'),
+ Timestamp('1234567890.00001', offset=17)):
+ fname = '%s.data' % ts.internal
+ try:
+ mgr.parse_on_disk_filename(fname)
+ msg = 'Expected DiskFileError for filename %s' % fname
+ self.fail(msg)
+ except DiskFileError:
+ pass
+
+ expected = {
+ '': 'bad',
+ 'foo': 'bad',
+ '1.314': 'bad',
+ 1.314: 'bad',
+ -2: 'negative',
+ '-2': 'negative',
+ None: 'bad',
+ 'None': 'bad',
+ }
+
+ for frag, msg in expected.items():
+ fname = '%s#%s.data' % (ts.internal, frag)
+ try:
+ mgr.parse_on_disk_filename(fname)
+ except DiskFileError as e:
+ self.assertTrue(msg in str(e).lower())
+ else:
+ msg = 'Expected DiskFileError for filename %s' % fname
+ self.fail(msg)
+
+ def test_make_on_disk_filename(self):
+ mgr = self.df_router[POLICIES.default]
+ for ts in (Timestamp('1234567890.00001'),
+ Timestamp('1234567890.00001', offset=17)):
+ for frag in (0, '0', 2, '2', 14, '14'):
+ expected = '%s#%s.data' % (ts.internal, frag)
+ actual = mgr.make_on_disk_filename(
+ ts, '.data', frag_index=frag)
+ self.assertEqual(expected, actual)
+ parsed = mgr.parse_on_disk_filename(actual)
+ self.assertEqual(parsed, {
+ 'timestamp': ts,
+ 'frag_index': int(frag),
+ 'ext': '.data',
+ })
+ # these functions are inverse
+ self.assertEqual(
+ mgr.make_on_disk_filename(**parsed),
+ expected)
+
+ for ext in ('.meta', '.durable', '.ts'):
+ expected = '%s%s' % (ts.internal, ext)
+ # frag index should not be required
+ actual = mgr.make_on_disk_filename(ts, ext)
+ self.assertEqual(expected, actual)
+ # frag index should be ignored
+ actual = mgr.make_on_disk_filename(
+ ts, ext, frag_index=frag)
+ self.assertEqual(expected, actual)
+ parsed = mgr.parse_on_disk_filename(actual)
+ self.assertEqual(parsed, {
+ 'timestamp': ts,
+ 'frag_index': None,
+ 'ext': ext,
+ })
+ # these functions are inverse
+ self.assertEqual(
+ mgr.make_on_disk_filename(**parsed),
+ expected)
+
+ actual = mgr.make_on_disk_filename(ts)
+ self.assertEqual(ts, actual)
+
+ def test_make_on_disk_filename_with_bad_frag_index(self):
+ mgr = self.df_router[POLICIES.default]
+ ts = Timestamp('1234567890.00001')
+ try:
+ # .data requires a frag_index kwarg
+ mgr.make_on_disk_filename(ts, '.data')
+ self.fail('Expected DiskFileError for missing frag_index')
+ except DiskFileError:
+ pass
+
+ for frag in (None, 'foo', '1.314', 1.314, -2, '-2'):
+ try:
+ mgr.make_on_disk_filename(ts, '.data', frag_index=frag)
+ self.fail('Expected DiskFileError for frag_index %s' % frag)
+ except DiskFileError:
+ pass
+ for ext in ('.meta', '.durable', '.ts'):
+ expected = '%s%s' % (ts.internal, ext)
+ # bad frag index should be ignored
+ actual = mgr.make_on_disk_filename(ts, ext, frag_index=frag)
+ self.assertEqual(expected, actual)
+
+ def test_is_obsolete(self):
+ mgr = self.df_router[POLICIES.default]
+ for ts in (Timestamp('1234567890.00001'),
+ Timestamp('1234567890.00001', offset=17)):
+ for ts2 in (Timestamp('1234567890.99999'),
+ Timestamp('1234567890.99999', offset=17),
+ ts):
+ f_2 = mgr.make_on_disk_filename(ts, '.durable')
+ for fi in (0, 2):
+ for ext in ('.data', '.meta', '.durable', '.ts'):
+ f_1 = mgr.make_on_disk_filename(
+ ts2, ext, frag_index=fi)
+ self.assertFalse(mgr.is_obsolete(f_1, f_2),
+ '%s should not be obsolete w.r.t. %s'
+ % (f_1, f_2))
+
+ for ts2 in (Timestamp('1234567890.00000'),
+ Timestamp('1234500000.00000', offset=0),
+ Timestamp('1234500000.00000', offset=17)):
+ f_2 = mgr.make_on_disk_filename(ts, '.durable')
+ for fi in (0, 2):
+ for ext in ('.data', '.meta', '.durable', '.ts'):
+ f_1 = mgr.make_on_disk_filename(
+ ts2, ext, frag_index=fi)
+ self.assertTrue(mgr.is_obsolete(f_1, f_2),
+ '%s should not be w.r.t. %s'
+ % (f_1, f_2))
+
+ def test_yield_hashes(self):
+ old_ts = '1383180000.12345'
+ fresh_ts = Timestamp(time() - 10).internal
+ fresher_ts = Timestamp(time() - 1).internal
+ suffix_map = {
+ 'abc': {
+ '9373a92d072897b136b3fc06595b4abc': [
+ fresh_ts + '.ts'],
+ },
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ old_ts + '#2.data',
+ old_ts + '.durable'],
+ '9373a92d072897b136b3fc06595b7456': [
+ fresh_ts + '.ts',
+ fresher_ts + '#2.data',
+ fresher_ts + '.durable'],
+ },
+ 'def': {},
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b4abc': fresh_ts,
+ '9373a92d072897b136b3fc06595b0456': old_ts,
+ '9373a92d072897b136b3fc06595b7456': fresher_ts,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+
+ def test_yield_hashes_yields_meta_timestamp(self):
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ ts2 = next(ts_iter)
+ ts3 = next(ts_iter)
+ suffix_map = {
+ 'abc': {
+ '9373a92d072897b136b3fc06595b4abc': [
+ ts1.internal + '.ts',
+ ts2.internal + '.meta'],
+ },
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ ts1.internal + '#2.data',
+ ts1.internal + '.durable',
+ ts2.internal + '.meta',
+ ts3.internal + '.meta'],
+ '9373a92d072897b136b3fc06595b7456': [
+ ts1.internal + '#2.data',
+ ts1.internal + '.durable',
+ ts2.internal + '.meta'],
+ },
+ }
+ expected = {
+ # TODO: differs from repl DiskFileManager which *will*
+ # return meta timestamp when only meta and ts on disk
+ '9373a92d072897b136b3fc06595b4abc': ts1,
+ '9373a92d072897b136b3fc06595b0456': ts3,
+ '9373a92d072897b136b3fc06595b7456': ts2,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected)
+
+ # but meta timestamp is not returned if specified frag index
+ # is not found
+ expected = {
+ # TODO: differs from repl DiskFileManager which *will*
+ # return meta timestamp when only meta and ts on disk
+ '9373a92d072897b136b3fc06595b4abc': ts1,
+ '9373a92d072897b136b3fc06595b0456': ts3,
+ '9373a92d072897b136b3fc06595b7456': ts2,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=3)
+
+ def test_yield_hashes_suffix_filter(self):
+ # test again with limited suffixes
+ old_ts = '1383180000.12345'
+ fresh_ts = Timestamp(time() - 10).internal
+ fresher_ts = Timestamp(time() - 1).internal
+ suffix_map = {
+ 'abc': {
+ '9373a92d072897b136b3fc06595b4abc': [
+ fresh_ts + '.ts'],
+ },
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ old_ts + '#2.data',
+ old_ts + '.durable'],
+ '9373a92d072897b136b3fc06595b7456': [
+ fresh_ts + '.ts',
+ fresher_ts + '#2.data',
+ fresher_ts + '.durable'],
+ },
+ 'def': {},
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': old_ts,
+ '9373a92d072897b136b3fc06595b7456': fresher_ts,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ suffixes=['456'], frag_index=2)
+
+ def test_yield_hashes_skips_missing_durable(self):
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ suffix_map = {
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ ts1.internal + '#2.data',
+ ts1.internal + '.durable'],
+ '9373a92d072897b136b3fc06595b7456': [
+ ts1.internal + '#2.data'],
+ },
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': ts1,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+
+ # if we add a durable it shows up
+ suffix_map['456']['9373a92d072897b136b3fc06595b7456'].append(
+ ts1.internal + '.durable')
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': ts1,
+ '9373a92d072897b136b3fc06595b7456': ts1,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+
+ def test_yield_hashes_skips_data_without_durable(self):
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ ts2 = next(ts_iter)
+ ts3 = next(ts_iter)
+ suffix_map = {
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ ts1.internal + '#2.data',
+ ts1.internal + '.durable',
+ ts2.internal + '#2.data',
+ ts3.internal + '#2.data'],
+ },
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': ts1,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=None)
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+
+ # if we add a durable then newer data shows up
+ suffix_map['456']['9373a92d072897b136b3fc06595b0456'].append(
+ ts2.internal + '.durable')
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': ts2,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=None)
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+
+ def test_yield_hashes_ignores_bad_ondisk_filesets(self):
+ # this differs from DiskFileManager.yield_hashes which will fail
+ # when encountering a bad on-disk file set
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ ts2 = next(ts_iter)
+ suffix_map = {
+ '456': {
+ '9373a92d072897b136b3fc06595b0456': [
+ ts1.internal + '#2.data',
+ ts1.internal + '.durable'],
+ '9373a92d072897b136b3fc06595b7456': [
+ ts1.internal + '.data'],
+ '9373a92d072897b136b3fc06595b8456': [
+ 'junk_file'],
+ '9373a92d072897b136b3fc06595b9456': [
+ ts1.internal + '.data',
+ ts2.internal + '.meta'],
+ '9373a92d072897b136b3fc06595ba456': [
+ ts1.internal + '.meta'],
+ '9373a92d072897b136b3fc06595bb456': [
+ ts1.internal + '.meta',
+ ts2.internal + '.meta'],
+ },
+ }
+ expected = {
+ '9373a92d072897b136b3fc06595b0456': ts1,
+ '9373a92d072897b136b3fc06595ba456': ts1,
+ '9373a92d072897b136b3fc06595bb456': ts2,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+
+ def test_yield_hashes_filters_frag_index(self):
+ ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
+ ts1 = next(ts_iter)
+ ts2 = next(ts_iter)
+ ts3 = next(ts_iter)
+ suffix_map = {
+ '27e': {
+ '1111111111111111111111111111127e': [
+ ts1.internal + '#2.data',
+ ts1.internal + '#3.data',
+ ts1.internal + '.durable',
+ ],
+ '2222222222222222222222222222227e': [
+ ts1.internal + '#2.data',
+ ts1.internal + '.durable',
+ ts2.internal + '#2.data',
+ ts2.internal + '.durable',
+ ],
+ },
+ 'd41': {
+ 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': [
+ ts1.internal + '#3.data',
+ ts1.internal + '.durable',
+ ],
+ },
+ '00b': {
+ '3333333333333333333333333333300b': [
+ ts1.internal + '#2.data',
+ ts2.internal + '#2.data',
+ ts3.internal + '#2.data',
+ ts3.internal + '.durable',
+ ],
+ },
+ }
+ expected = {
+ '1111111111111111111111111111127e': ts1,
+ '2222222222222222222222222222227e': ts2,
+ '3333333333333333333333333333300b': ts3,
+ }
+ self._check_yield_hashes(POLICIES.default, suffix_map, expected,
+ frag_index=2)
+
+ def test_get_diskfile_from_hash_frag_index_filter(self):
+ df = self._get_diskfile(POLICIES.default)
+ hash_ = os.path.basename(df._datadir)
+ self.assertRaises(DiskFileNotExist,
+ self.df_mgr.get_diskfile_from_hash,
+ self.existing_device1, '0', hash_,
+ POLICIES.default) # sanity
+ frag_index = 7
+ timestamp = Timestamp(time())
+ for frag_index in (4, 7):
+ with df.create() as writer:
+ data = 'test_data'
+ writer.write(data)
+ metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': len(data),
+ 'X-Object-Sysmeta-Ec-Frag-Index': str(frag_index),
+ }
+ writer.put(metadata)
+ writer.commit(timestamp)
+
+ df4 = self.df_mgr.get_diskfile_from_hash(
+ self.existing_device1, '0', hash_, POLICIES.default, frag_index=4)
+ self.assertEqual(df4._frag_index, 4)
+ self.assertEqual(
+ df4.read_metadata()['X-Object-Sysmeta-Ec-Frag-Index'], '4')
+ df7 = self.df_mgr.get_diskfile_from_hash(
+ self.existing_device1, '0', hash_, POLICIES.default, frag_index=7)
+ self.assertEqual(df7._frag_index, 7)
+ self.assertEqual(
+ df7.read_metadata()['X-Object-Sysmeta-Ec-Frag-Index'], '7')
+
+
+class DiskFileMixin(BaseDiskFileTestMixin):
+
+ # set mgr_cls on subclasses
+ mgr_cls = None
def setUp(self):
"""Set up for testing swift.obj.diskfile"""
@@ -978,12 +1766,22 @@ class TestDiskFile(unittest.TestCase):
self.existing_device = 'sda1'
for policy in POLICIES:
mkdirs(os.path.join(self.testdir, self.existing_device,
- get_tmp_dir(policy.idx)))
+ diskfile.get_tmp_dir(policy)))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(devices=self.testdir, mount_check='false',
keep_cache_size=2 * 1024, mb_per_sync=1)
- self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
+ self.logger = debug_logger('test-' + self.__class__.__name__)
+ self.df_mgr = self.mgr_cls(self.conf, self.logger)
+ self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
+ self._ts_iter = (Timestamp(t) for t in
+ itertools.count(int(time())))
+
+ def ts(self):
+ """
+ Timestamps - forever.
+ """
+ return next(self._ts_iter)
def tearDown(self):
"""Tear down for testing swift.obj.diskfile"""
@@ -995,11 +1793,11 @@ class TestDiskFile(unittest.TestCase):
mkdirs(df._datadir)
if timestamp is None:
timestamp = time()
- timestamp = Timestamp(timestamp).internal
+ timestamp = Timestamp(timestamp)
if not metadata:
metadata = {}
if 'X-Timestamp' not in metadata:
- metadata['X-Timestamp'] = Timestamp(timestamp).internal
+ metadata['X-Timestamp'] = timestamp.internal
if 'ETag' not in metadata:
etag = md5()
etag.update(data)
@@ -1008,17 +1806,24 @@ class TestDiskFile(unittest.TestCase):
metadata['name'] = '/a/c/o'
if 'Content-Length' not in metadata:
metadata['Content-Length'] = str(len(data))
- data_file = os.path.join(df._datadir, timestamp + ext)
+ filename = timestamp.internal + ext
+ if ext == '.data' and df.policy.policy_type == EC_POLICY:
+ filename = '%s#%s.data' % (timestamp.internal, df._frag_index)
+ data_file = os.path.join(df._datadir, filename)
with open(data_file, 'wb') as f:
f.write(data)
xattr.setxattr(f.fileno(), diskfile.METADATA_KEY,
pickle.dumps(metadata, diskfile.PICKLE_PROTOCOL))
def _simple_get_diskfile(self, partition='0', account='a', container='c',
- obj='o', policy_idx=0):
- return self.df_mgr.get_diskfile(self.existing_device,
- partition, account, container, obj,
- policy_idx)
+ obj='o', policy=None, frag_index=None):
+ policy = policy or POLICIES.default
+ df_mgr = self.df_router[policy]
+ if policy.policy_type == EC_POLICY and frag_index is None:
+ frag_index = 2
+ return df_mgr.get_diskfile(self.existing_device, partition,
+ account, container, obj,
+ policy=policy, frag_index=frag_index)
def _create_test_file(self, data, timestamp=None, metadata=None,
account='a', container='c', obj='o'):
@@ -1027,12 +1832,62 @@ class TestDiskFile(unittest.TestCase):
metadata.setdefault('name', '/%s/%s/%s' % (account, container, obj))
df = self._simple_get_diskfile(account=account, container=container,
obj=obj)
- self._create_ondisk_file(df, data, timestamp, metadata)
- df = self._simple_get_diskfile(account=account, container=container,
- obj=obj)
+ if timestamp is None:
+ timestamp = time()
+ timestamp = Timestamp(timestamp)
+ with df.create() as writer:
+ new_metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': len(data),
+ }
+ new_metadata.update(metadata)
+ writer.write(data)
+ writer.put(new_metadata)
+ writer.commit(timestamp)
df.open()
return df
+ def test_get_dev_path(self):
+ self.df_mgr.devices = '/srv'
+ device = 'sda1'
+ dev_path = os.path.join(self.df_mgr.devices, device)
+
+ mount_check = None
+ self.df_mgr.mount_check = True
+ with mock.patch('swift.obj.diskfile.check_mount',
+ mock.MagicMock(return_value=False)):
+ self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
+ None)
+ with mock.patch('swift.obj.diskfile.check_mount',
+ mock.MagicMock(return_value=True)):
+ self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
+ dev_path)
+
+ self.df_mgr.mount_check = False
+ with mock.patch('swift.obj.diskfile.check_dir',
+ mock.MagicMock(return_value=False)):
+ self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
+ None)
+ with mock.patch('swift.obj.diskfile.check_dir',
+ mock.MagicMock(return_value=True)):
+ self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
+ dev_path)
+
+ mount_check = True
+ with mock.patch('swift.obj.diskfile.check_mount',
+ mock.MagicMock(return_value=False)):
+ self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
+ None)
+ with mock.patch('swift.obj.diskfile.check_mount',
+ mock.MagicMock(return_value=True)):
+ self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
+ dev_path)
+
+ mount_check = False
+ self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
+ dev_path)
+
def test_open_not_exist(self):
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
@@ -1050,15 +1905,17 @@ class TestDiskFile(unittest.TestCase):
self.fail("Unexpected swift exception raised: %r" % err)
def test_get_metadata(self):
- df = self._create_test_file('1234567890', timestamp=42)
+ timestamp = self.ts().internal
+ df = self._create_test_file('1234567890', timestamp=timestamp)
md = df.get_metadata()
- self.assertEqual(md['X-Timestamp'], Timestamp(42).internal)
+ self.assertEqual(md['X-Timestamp'], timestamp)
def test_read_metadata(self):
- self._create_test_file('1234567890', timestamp=42)
+ timestamp = self.ts().internal
+ self._create_test_file('1234567890', timestamp=timestamp)
df = self._simple_get_diskfile()
md = df.read_metadata()
- self.assertEqual(md['X-Timestamp'], Timestamp(42).internal)
+ self.assertEqual(md['X-Timestamp'], timestamp)
def test_read_metadata_no_xattr(self):
def mock_getxattr(*args, **kargs):
@@ -1086,15 +1943,16 @@ class TestDiskFile(unittest.TestCase):
self.fail("Expected DiskFileNotOpen exception")
def test_disk_file_default_disallowed_metadata(self):
- # build an object with some meta (ts 41)
+ # build an object with some meta (at t0+1s)
orig_metadata = {'X-Object-Meta-Key1': 'Value1',
'Content-Type': 'text/garbage'}
- df = self._get_open_disk_file(ts=41, extra_metadata=orig_metadata)
+ df = self._get_open_disk_file(ts=self.ts().internal,
+ extra_metadata=orig_metadata)
with df.open():
self.assertEquals('1024', df._metadata['Content-Length'])
- # write some new metadata (fast POST, don't send orig meta, ts 42)
+ # write some new metadata (fast POST, don't send orig meta, at t0+1)
df = self._simple_get_diskfile()
- df.write_metadata({'X-Timestamp': Timestamp(42).internal,
+ df.write_metadata({'X-Timestamp': self.ts().internal,
'X-Object-Meta-Key2': 'Value2'})
df = self._simple_get_diskfile()
with df.open():
@@ -1106,15 +1964,16 @@ class TestDiskFile(unittest.TestCase):
self.assertEquals('Value2', df._metadata['X-Object-Meta-Key2'])
def test_disk_file_preserves_sysmeta(self):
- # build an object with some meta (ts 41)
+ # build an object with some meta (at t0)
orig_metadata = {'X-Object-Sysmeta-Key1': 'Value1',
'Content-Type': 'text/garbage'}
- df = self._get_open_disk_file(ts=41, extra_metadata=orig_metadata)
+ df = self._get_open_disk_file(ts=self.ts().internal,
+ extra_metadata=orig_metadata)
with df.open():
self.assertEquals('1024', df._metadata['Content-Length'])
- # write some new metadata (fast POST, don't send orig meta, ts 42)
+ # write some new metadata (fast POST, don't send orig meta, at t0+1s)
df = self._simple_get_diskfile()
- df.write_metadata({'X-Timestamp': Timestamp(42).internal,
+ df.write_metadata({'X-Timestamp': self.ts().internal,
'X-Object-Sysmeta-Key1': 'Value2',
'X-Object-Meta-Key3': 'Value3'})
df = self._simple_get_diskfile()
@@ -1268,34 +2127,38 @@ class TestDiskFile(unittest.TestCase):
def test_disk_file_mkstemp_creates_dir(self):
for policy in POLICIES:
tmpdir = os.path.join(self.testdir, self.existing_device,
- get_tmp_dir(policy.idx))
+ diskfile.get_tmp_dir(policy))
os.rmdir(tmpdir)
- df = self._simple_get_diskfile(policy_idx=policy.idx)
+ df = self._simple_get_diskfile(policy=policy)
with df.create():
self.assert_(os.path.exists(tmpdir))
def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024,
csize=8, mark_deleted=False, prealloc=False,
- ts=None, mount_check=False, extra_metadata=None):
+ ts=None, mount_check=False, extra_metadata=None,
+ policy=None, frag_index=None):
'''returns a DiskFile'''
- df = self._simple_get_diskfile(obj=obj_name)
+ policy = policy or POLICIES.legacy
+ df = self._simple_get_diskfile(obj=obj_name, policy=policy,
+ frag_index=frag_index)
data = '0' * fsize
etag = md5()
if ts:
- timestamp = ts
+ timestamp = Timestamp(ts)
else:
- timestamp = Timestamp(time()).internal
+ timestamp = Timestamp(time())
if prealloc:
prealloc_size = fsize
else:
prealloc_size = None
+
with df.create(size=prealloc_size) as writer:
upload_size = writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
- 'X-Timestamp': timestamp,
+ 'X-Timestamp': timestamp.internal,
'Content-Length': str(upload_size),
}
metadata.update(extra_metadata or {})
@@ -1318,6 +2181,7 @@ class TestDiskFile(unittest.TestCase):
elif invalid_type == 'Bad-X-Delete-At':
metadata['X-Delete-At'] = 'bad integer'
diskfile.write_metadata(writer._fd, metadata)
+ writer.commit(timestamp)
if mark_deleted:
df.delete(timestamp)
@@ -1348,9 +2212,16 @@ class TestDiskFile(unittest.TestCase):
self.conf['disk_chunk_size'] = csize
self.conf['mount_check'] = mount_check
- self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
- df = self._simple_get_diskfile(obj=obj_name)
+ self.df_mgr = self.mgr_cls(self.conf, self.logger)
+ self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
+
+ # actual on disk frag_index may have been set by metadata
+ frag_index = metadata.get('X-Object-Sysmeta-Ec-Frag-Index',
+ frag_index)
+ df = self._simple_get_diskfile(obj=obj_name, policy=policy,
+ frag_index=frag_index)
df.open()
+
if invalid_type == 'Zero-Byte':
fp = open(df._data_file, 'w')
fp.close()
@@ -1576,7 +2447,7 @@ class TestDiskFile(unittest.TestCase):
pass
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
- 'xyz')
+ 'xyz', policy=POLICIES.legacy)
self.assertRaises(DiskFileQuarantined, df.open)
# make sure the right thing got quarantined; the suffix dir should not
@@ -1586,7 +2457,7 @@ class TestDiskFile(unittest.TestCase):
def test_create_prealloc(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
- 'xyz')
+ 'xyz', policy=POLICIES.legacy)
with mock.patch("swift.obj.diskfile.fallocate") as fa:
with df.create(size=200) as writer:
used_fd = writer._fd
@@ -1594,7 +2465,7 @@ class TestDiskFile(unittest.TestCase):
def test_create_prealloc_oserror(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
- 'xyz')
+ 'xyz', policy=POLICIES.legacy)
for e in (errno.ENOSPC, errno.EDQUOT):
with mock.patch("swift.obj.diskfile.fallocate",
mock.MagicMock(side_effect=OSError(
@@ -1621,7 +2492,7 @@ class TestDiskFile(unittest.TestCase):
def test_create_mkstemp_no_space(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
- 'xyz')
+ 'xyz', policy=POLICIES.legacy)
for e in (errno.ENOSPC, errno.EDQUOT):
with mock.patch("swift.obj.diskfile.mkstemp",
mock.MagicMock(side_effect=OSError(
@@ -1648,7 +2519,7 @@ class TestDiskFile(unittest.TestCase):
def test_create_close_oserror(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
- 'xyz')
+ 'xyz', policy=POLICIES.legacy)
with mock.patch("swift.obj.diskfile.os.close",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
@@ -1662,11 +2533,12 @@ class TestDiskFile(unittest.TestCase):
def test_write_metadata(self):
df = self._create_test_file('1234567890')
+ file_count = len(os.listdir(df._datadir))
timestamp = Timestamp(time()).internal
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
- self.assertEquals(len(dl), 2)
+ self.assertEquals(len(dl), file_count + 1)
exp_name = '%s.meta' % timestamp
self.assertTrue(exp_name in set(dl))
@@ -1704,14 +2576,135 @@ class TestDiskFile(unittest.TestCase):
DiskFileNoSpace,
diskfile.write_metadata, 'n/a', metadata)
+ def _create_diskfile_dir(self, timestamp, policy):
+ timestamp = Timestamp(timestamp)
+ df = self._simple_get_diskfile(account='a', container='c',
+ obj='o_%s' % policy,
+ policy=policy)
+
+ with df.create() as writer:
+ metadata = {
+ 'ETag': 'bogus_etag',
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': '0',
+ }
+ if policy.policy_type == EC_POLICY:
+ metadata['X-Object-Sysmeta-Ec-Frag-Index'] = \
+ df._frag_index or 7
+ writer.put(metadata)
+ writer.commit(timestamp)
+ return writer._datadir
+
+ def test_commit(self):
+ for policy in POLICIES:
+ # create first fileset as starting state
+ timestamp = Timestamp(time()).internal
+ datadir = self._create_diskfile_dir(timestamp, policy)
+ dl = os.listdir(datadir)
+ expected = ['%s.data' % timestamp]
+ if policy.policy_type == EC_POLICY:
+ expected = ['%s#2.data' % timestamp,
+ '%s.durable' % timestamp]
+ self.assertEquals(len(dl), len(expected),
+ 'Unexpected dir listing %s' % dl)
+ self.assertEqual(sorted(expected), sorted(dl))
+
+ def test_write_cleanup(self):
+ for policy in POLICIES:
+ # create first fileset as starting state
+ timestamp_1 = Timestamp(time()).internal
+ datadir_1 = self._create_diskfile_dir(timestamp_1, policy)
+ # second write should clean up first fileset
+ timestamp_2 = Timestamp(time() + 1).internal
+ datadir_2 = self._create_diskfile_dir(timestamp_2, policy)
+ # sanity check
+ self.assertEqual(datadir_1, datadir_2)
+ dl = os.listdir(datadir_2)
+ expected = ['%s.data' % timestamp_2]
+ if policy.policy_type == EC_POLICY:
+ expected = ['%s#2.data' % timestamp_2,
+ '%s.durable' % timestamp_2]
+ self.assertEquals(len(dl), len(expected),
+ 'Unexpected dir listing %s' % dl)
+ self.assertEqual(sorted(expected), sorted(dl))
+
+ def test_commit_fsync(self):
+ for policy in POLICIES:
+ mock_fsync = mock.MagicMock()
+ df = self._simple_get_diskfile(account='a', container='c',
+ obj='o', policy=policy)
+
+ timestamp = Timestamp(time())
+ with df.create() as writer:
+ metadata = {
+ 'ETag': 'bogus_etag',
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': '0',
+ }
+ writer.put(metadata)
+ with mock.patch('swift.obj.diskfile.fsync', mock_fsync):
+ writer.commit(timestamp)
+ expected = {
+ EC_POLICY: 1,
+ REPL_POLICY: 0,
+ }[policy.policy_type]
+ self.assertEqual(expected, mock_fsync.call_count)
+ if policy.policy_type == EC_POLICY:
+ durable_file = '%s.durable' % timestamp.internal
+ self.assertTrue(durable_file in str(mock_fsync.call_args[0]))
+
+ def test_commit_ignores_hash_cleanup_listdir_error(self):
+ for policy in POLICIES:
+ # Check OSError from hash_cleanup_listdir is caught and ignored
+ mock_hcl = mock.MagicMock(side_effect=OSError)
+ df = self._simple_get_diskfile(account='a', container='c',
+ obj='o_hcl_error', policy=policy)
+
+ timestamp = Timestamp(time())
+ with df.create() as writer:
+ metadata = {
+ 'ETag': 'bogus_etag',
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': '0',
+ }
+ writer.put(metadata)
+ with mock.patch(self._manager_mock(
+ 'hash_cleanup_listdir', df), mock_hcl):
+ writer.commit(timestamp)
+ expected = {
+ EC_POLICY: 1,
+ REPL_POLICY: 0,
+ }[policy.policy_type]
+ self.assertEqual(expected, mock_hcl.call_count)
+ expected = ['%s.data' % timestamp.internal]
+ if policy.policy_type == EC_POLICY:
+ expected = ['%s#2.data' % timestamp.internal,
+ '%s.durable' % timestamp.internal]
+ dl = os.listdir(df._datadir)
+ self.assertEquals(len(dl), len(expected),
+ 'Unexpected dir listing %s' % dl)
+ self.assertEqual(sorted(expected), sorted(dl))
+
def test_delete(self):
- df = self._get_open_disk_file()
- ts = time()
- df.delete(ts)
- exp_name = '%s.ts' % Timestamp(ts).internal
- dl = os.listdir(df._datadir)
- self.assertEquals(len(dl), 1)
- self.assertTrue(exp_name in set(dl))
+ for policy in POLICIES:
+ if policy.policy_type == EC_POLICY:
+ metadata = {'X-Object-Sysmeta-Ec-Frag-Index': '1'}
+ fi = 1
+ else:
+ metadata = {}
+ fi = None
+ df = self._get_open_disk_file(policy=policy, frag_index=fi,
+ extra_metadata=metadata)
+
+ ts = Timestamp(time())
+ df.delete(ts)
+ exp_name = '%s.ts' % ts.internal
+ dl = os.listdir(df._datadir)
+ self.assertEquals(len(dl), 1)
+ self.assertTrue(exp_name in set(dl),
+ 'Expected file %s missing in %s' % (exp_name, dl))
+ # cleanup before next policy
+ os.unlink(os.path.join(df._datadir, exp_name))
def test_open_deleted(self):
df = self._get_open_disk_file()
@@ -1748,7 +2741,8 @@ class TestDiskFile(unittest.TestCase):
'blah blah',
account='three', container='blind', obj='mice')._datadir
df = self.df_mgr.get_diskfile_from_audit_location(
- diskfile.AuditLocation(hashdir, self.existing_device, '0'))
+ diskfile.AuditLocation(hashdir, self.existing_device, '0',
+ policy=POLICIES.default))
df.open()
self.assertEqual(df._name, '/three/blind/mice')
@@ -1756,14 +2750,16 @@ class TestDiskFile(unittest.TestCase):
hashdir = self._create_test_file(
'blah blah',
account='this', container='is', obj='right')._datadir
-
- datafile = os.path.join(hashdir, os.listdir(hashdir)[0])
+ datafilename = [f for f in os.listdir(hashdir)
+ if f.endswith('.data')][0]
+ datafile = os.path.join(hashdir, datafilename)
meta = diskfile.read_metadata(datafile)
meta['name'] = '/this/is/wrong'
diskfile.write_metadata(datafile, meta)
df = self.df_mgr.get_diskfile_from_audit_location(
- diskfile.AuditLocation(hashdir, self.existing_device, '0'))
+ diskfile.AuditLocation(hashdir, self.existing_device, '0',
+ policy=POLICIES.default))
self.assertRaises(DiskFileQuarantined, df.open)
def test_close_error(self):
@@ -1778,7 +2774,10 @@ class TestDiskFile(unittest.TestCase):
pass
# close is called at the end of the iterator
self.assertEquals(reader._fp, None)
- self.assertEquals(len(df._logger.log_dict['error']), 1)
+ error_lines = df._logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ self.assertTrue('close failure' in error_lines[0])
+ self.assertTrue('Bad' in error_lines[0])
def test_mount_checking(self):
@@ -1829,6 +2828,9 @@ class TestDiskFile(unittest.TestCase):
self._create_ondisk_file(df, '', ext='.meta', timestamp=9)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=8)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=7)
+ if df.policy.policy_type == EC_POLICY:
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=8)
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=7)
self._create_ondisk_file(df, '', ext='.ts', timestamp=6)
self._create_ondisk_file(df, '', ext='.ts', timestamp=5)
df = self._simple_get_diskfile()
@@ -1842,6 +2844,9 @@ class TestDiskFile(unittest.TestCase):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, 'B', ext='.data', timestamp=10)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=9)
+ if df.policy.policy_type == EC_POLICY:
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=10)
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=9)
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
@@ -1858,6 +2863,9 @@ class TestDiskFile(unittest.TestCase):
self._create_ondisk_file(df, 'X', ext='.bar', timestamp=11)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=10)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=9)
+ if df.policy.policy_type == EC_POLICY:
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=10)
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=9)
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
@@ -1879,6 +2887,9 @@ class TestDiskFile(unittest.TestCase):
self._create_ondisk_file(df, 'X', ext='.bar', timestamp=11)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=10)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=9)
+ if df.policy.policy_type == EC_POLICY:
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=10)
+ self._create_ondisk_file(df, '', ext='.durable', timestamp=9)
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
@@ -1900,300 +2911,6 @@ class TestDiskFile(unittest.TestCase):
log_lines = df._logger.get_lines_for_level('error')
self.assert_('a very special error' in log_lines[-1])
- def test_get_diskfile_from_hash_dev_path_fail(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- hclistdir.return_value = ['1381679759.90941.data']
- readmeta.return_value = {'name': '/a/c/o'}
- self.assertRaises(
- DiskFileDeviceUnavailable,
- self.df_mgr.get_diskfile_from_hash,
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
-
- def test_get_diskfile_from_hash_not_dir(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata'),
- mock.patch('swift.obj.diskfile.quarantine_renamer')) as \
- (dfclass, hclistdir, readmeta, quarantine_renamer):
- osexc = OSError()
- osexc.errno = errno.ENOTDIR
- hclistdir.side_effect = osexc
- readmeta.return_value = {'name': '/a/c/o'}
- self.assertRaises(
- DiskFileNotExist,
- self.df_mgr.get_diskfile_from_hash,
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
- quarantine_renamer.assert_called_once_with(
- '/srv/dev/',
- '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900')
-
- def test_get_diskfile_from_hash_no_dir(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- osexc = OSError()
- osexc.errno = errno.ENOENT
- hclistdir.side_effect = osexc
- readmeta.return_value = {'name': '/a/c/o'}
- self.assertRaises(
- DiskFileNotExist,
- self.df_mgr.get_diskfile_from_hash,
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
-
- def test_get_diskfile_from_hash_other_oserror(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- osexc = OSError()
- hclistdir.side_effect = osexc
- readmeta.return_value = {'name': '/a/c/o'}
- self.assertRaises(
- OSError,
- self.df_mgr.get_diskfile_from_hash,
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
-
- def test_get_diskfile_from_hash_no_actual_files(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- hclistdir.return_value = []
- readmeta.return_value = {'name': '/a/c/o'}
- self.assertRaises(
- DiskFileNotExist,
- self.df_mgr.get_diskfile_from_hash,
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
-
- def test_get_diskfile_from_hash_read_metadata_problem(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- hclistdir.return_value = ['1381679759.90941.data']
- readmeta.side_effect = EOFError()
- self.assertRaises(
- DiskFileNotExist,
- self.df_mgr.get_diskfile_from_hash,
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
-
- def test_get_diskfile_from_hash_no_meta_name(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- hclistdir.return_value = ['1381679759.90941.data']
- readmeta.return_value = {}
- try:
- self.df_mgr.get_diskfile_from_hash(
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
- except DiskFileNotExist as err:
- exc = err
- self.assertEqual(str(exc), '')
-
- def test_get_diskfile_from_hash_bad_meta_name(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- hclistdir.return_value = ['1381679759.90941.data']
- readmeta.return_value = {'name': 'bad'}
- try:
- self.df_mgr.get_diskfile_from_hash(
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
- except DiskFileNotExist as err:
- exc = err
- self.assertEqual(str(exc), '')
-
- def test_get_diskfile_from_hash(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
- with nested(
- mock.patch('swift.obj.diskfile.DiskFile'),
- mock.patch('swift.obj.diskfile.hash_cleanup_listdir'),
- mock.patch('swift.obj.diskfile.read_metadata')) as \
- (dfclass, hclistdir, readmeta):
- hclistdir.return_value = ['1381679759.90941.data']
- readmeta.return_value = {'name': '/a/c/o'}
- self.df_mgr.get_diskfile_from_hash(
- 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', 0)
- dfclass.assert_called_once_with(
- self.df_mgr, '/srv/dev/', self.df_mgr.threadpools['dev'], '9',
- 'a', 'c', 'o', policy_idx=0)
- hclistdir.assert_called_once_with(
- '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900',
- 604800)
- readmeta.assert_called_once_with(
- '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/'
- '1381679759.90941.data')
-
- def test_listdir_enoent(self):
- oserror = OSError()
- oserror.errno = errno.ENOENT
- self.df_mgr.logger.error = mock.MagicMock()
- with mock.patch('os.listdir', side_effect=oserror):
- self.assertEqual(self.df_mgr._listdir('path'), [])
- self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
-
- def test_listdir_other_oserror(self):
- oserror = OSError()
- self.df_mgr.logger.error = mock.MagicMock()
- with mock.patch('os.listdir', side_effect=oserror):
- self.assertEqual(self.df_mgr._listdir('path'), [])
- self.df_mgr.logger.error.assert_called_once_with(
- 'ERROR: Skipping %r due to error with listdir attempt: %s',
- 'path', oserror)
-
- def test_listdir(self):
- self.df_mgr.logger.error = mock.MagicMock()
- with mock.patch('os.listdir', return_value=['abc', 'def']):
- self.assertEqual(self.df_mgr._listdir('path'), ['abc', 'def'])
- self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
-
- def test_yield_suffixes_dev_path_fail(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
- exc = None
- try:
- list(self.df_mgr.yield_suffixes('dev', '9', 0))
- except DiskFileDeviceUnavailable as err:
- exc = err
- self.assertEqual(str(exc), '')
-
- def test_yield_suffixes(self):
- self.df_mgr._listdir = mock.MagicMock(return_value=[
- 'abc', 'def', 'ghi', 'abcd', '012'])
- self.assertEqual(
- list(self.df_mgr.yield_suffixes('dev', '9', 0)),
- [(self.testdir + '/dev/objects/9/abc', 'abc'),
- (self.testdir + '/dev/objects/9/def', 'def'),
- (self.testdir + '/dev/objects/9/012', '012')])
-
- def test_yield_hashes_dev_path_fail(self):
- self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
- exc = None
- try:
- list(self.df_mgr.yield_hashes('dev', '9', 0))
- except DiskFileDeviceUnavailable as err:
- exc = err
- self.assertEqual(str(exc), '')
-
- def test_yield_hashes_empty(self):
- def _listdir(path):
- return []
-
- with mock.patch('os.listdir', _listdir):
- self.assertEqual(list(self.df_mgr.yield_hashes('dev', '9', 0)), [])
-
- def test_yield_hashes_empty_suffixes(self):
- def _listdir(path):
- return []
-
- with mock.patch('os.listdir', _listdir):
- self.assertEqual(
- list(self.df_mgr.yield_hashes('dev', '9', 0,
- suffixes=['456'])), [])
-
- def test_yield_hashes(self):
- fresh_ts = Timestamp(time() - 10).internal
- fresher_ts = Timestamp(time() - 1).internal
-
- def _listdir(path):
- if path.endswith('/dev/objects/9'):
- return ['abc', '456', 'def']
- elif path.endswith('/dev/objects/9/abc'):
- return ['9373a92d072897b136b3fc06595b4abc']
- elif path.endswith(
- '/dev/objects/9/abc/9373a92d072897b136b3fc06595b4abc'):
- return [fresh_ts + '.ts']
- elif path.endswith('/dev/objects/9/456'):
- return ['9373a92d072897b136b3fc06595b0456',
- '9373a92d072897b136b3fc06595b7456']
- elif path.endswith(
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b0456'):
- return ['1383180000.12345.data']
- elif path.endswith(
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b7456'):
- return [fresh_ts + '.ts',
- fresher_ts + '.data']
- elif path.endswith('/dev/objects/9/def'):
- return []
- else:
- raise Exception('Unexpected listdir of %r' % path)
-
- with nested(
- mock.patch('os.listdir', _listdir),
- mock.patch('os.unlink')):
- self.assertEqual(
- list(self.df_mgr.yield_hashes('dev', '9', 0)),
- [(self.testdir +
- '/dev/objects/9/abc/9373a92d072897b136b3fc06595b4abc',
- '9373a92d072897b136b3fc06595b4abc', fresh_ts),
- (self.testdir +
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b0456',
- '9373a92d072897b136b3fc06595b0456', '1383180000.12345'),
- (self.testdir +
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b7456',
- '9373a92d072897b136b3fc06595b7456', fresher_ts)])
-
- def test_yield_hashes_suffixes(self):
- fresh_ts = Timestamp(time() - 10).internal
- fresher_ts = Timestamp(time() - 1).internal
-
- def _listdir(path):
- if path.endswith('/dev/objects/9'):
- return ['abc', '456', 'def']
- elif path.endswith('/dev/objects/9/abc'):
- return ['9373a92d072897b136b3fc06595b4abc']
- elif path.endswith(
- '/dev/objects/9/abc/9373a92d072897b136b3fc06595b4abc'):
- return [fresh_ts + '.ts']
- elif path.endswith('/dev/objects/9/456'):
- return ['9373a92d072897b136b3fc06595b0456',
- '9373a92d072897b136b3fc06595b7456']
- elif path.endswith(
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b0456'):
- return ['1383180000.12345.data']
- elif path.endswith(
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b7456'):
- return [fresh_ts + '.ts',
- fresher_ts + '.data']
- elif path.endswith('/dev/objects/9/def'):
- return []
- else:
- raise Exception('Unexpected listdir of %r' % path)
-
- with nested(
- mock.patch('os.listdir', _listdir),
- mock.patch('os.unlink')):
- self.assertEqual(
- list(self.df_mgr.yield_hashes(
- 'dev', '9', 0, suffixes=['456'])),
- [(self.testdir +
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b0456',
- '9373a92d072897b136b3fc06595b0456', '1383180000.12345'),
- (self.testdir +
- '/dev/objects/9/456/9373a92d072897b136b3fc06595b7456',
- '9373a92d072897b136b3fc06595b7456', fresher_ts)])
-
def test_diskfile_names(self):
df = self._simple_get_diskfile()
self.assertEqual(df.account, 'a')
@@ -2259,10 +2976,11 @@ class TestDiskFile(unittest.TestCase):
self.assertEqual(str(exc), '')
def test_diskfile_timestamp(self):
- self._get_open_disk_file(ts='1383181759.12345')
+ ts = Timestamp(time())
+ self._get_open_disk_file(ts=ts.internal)
df = self._simple_get_diskfile()
with df.open():
- self.assertEqual(df.timestamp, '1383181759.12345')
+ self.assertEqual(df.timestamp, ts.internal)
def test_error_in_hash_cleanup_listdir(self):
@@ -2270,16 +2988,16 @@ class TestDiskFile(unittest.TestCase):
raise OSError()
df = self._get_open_disk_file()
+ file_count = len(os.listdir(df._datadir))
ts = time()
- with mock.patch("swift.obj.diskfile.hash_cleanup_listdir",
- mock_hcl):
+ with mock.patch(self._manager_mock('hash_cleanup_listdir'), mock_hcl):
try:
df.delete(ts)
except OSError:
self.fail("OSError raised when it should have been swallowed")
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
- self.assertEquals(len(dl), 2)
+ self.assertEquals(len(dl), file_count + 1)
self.assertTrue(exp_name in set(dl))
def _system_can_zero_copy(self):
@@ -2300,7 +3018,6 @@ class TestDiskFile(unittest.TestCase):
self.conf['splice'] = 'on'
self.conf['keep_cache_size'] = 16384
self.conf['disk_chunk_size'] = 4096
- self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
df = self._get_open_disk_file(fsize=16385)
reader = df.reader()
@@ -2314,7 +3031,7 @@ class TestDiskFile(unittest.TestCase):
def test_zero_copy_turns_off_when_md5_sockets_not_supported(self):
if not self._system_can_zero_copy():
raise SkipTest("zero-copy support is missing")
-
+ df_mgr = self.df_router[POLICIES.default]
self.conf['splice'] = 'on'
with mock.patch('swift.obj.diskfile.get_md5_socket') as mock_md5sock:
mock_md5sock.side_effect = IOError(
@@ -2323,7 +3040,7 @@ class TestDiskFile(unittest.TestCase):
reader = df.reader()
self.assertFalse(reader.can_zero_copy_send())
- log_lines = self.df_mgr.logger.get_lines_for_level('warning')
+ log_lines = df_mgr.logger.get_lines_for_level('warning')
self.assert_('MD5 sockets' in log_lines[-1])
def test_tee_to_md5_pipe_length_mismatch(self):
@@ -2420,7 +3137,7 @@ class TestDiskFile(unittest.TestCase):
def test_create_unlink_cleanup_DiskFileNoSpace(self):
# Test cleanup when DiskFileNoSpace() is raised.
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
- 'xyz')
+ 'xyz', policy=POLICIES.legacy)
_m_fallocate = mock.MagicMock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_unlink = mock.Mock()
@@ -2435,7 +3152,7 @@ class TestDiskFile(unittest.TestCase):
self.fail("Expected exception DiskFileNoSpace")
self.assertTrue(_m_fallocate.called)
self.assertTrue(_m_unlink.called)
- self.assert_(len(self.df_mgr.logger.log_dict['exception']) == 0)
+ self.assertTrue('error' not in self.logger.all_log_lines())
def test_create_unlink_cleanup_renamer_fails(self):
# Test cleanup when renamer fails
@@ -2462,12 +3179,12 @@ class TestDiskFile(unittest.TestCase):
self.assertFalse(writer.put_succeeded)
self.assertTrue(_m_renamer.called)
self.assertTrue(_m_unlink.called)
- self.assert_(len(self.df_mgr.logger.log_dict['exception']) == 0)
+ self.assertTrue('error' not in self.logger.all_log_lines())
def test_create_unlink_cleanup_logging(self):
# Test logging of os.unlink() failures.
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
- 'xyz')
+ 'xyz', policy=POLICIES.legacy)
_m_fallocate = mock.MagicMock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_unlink = mock.MagicMock(side_effect=OSError(errno.ENOENT,
@@ -2483,8 +3200,1633 @@ class TestDiskFile(unittest.TestCase):
self.fail("Expected exception DiskFileNoSpace")
self.assertTrue(_m_fallocate.called)
self.assertTrue(_m_unlink.called)
- self.assert_(self.df_mgr.logger.log_dict['exception'][0][0][0].
- startswith("Error removing tempfile:"))
+ error_lines = self.logger.get_lines_for_level('error')
+ for line in error_lines:
+ self.assertTrue(line.startswith("Error removing tempfile:"))
+
+
+@patch_policies(test_policies)
+class TestDiskFile(DiskFileMixin, unittest.TestCase):
+
+ mgr_cls = diskfile.DiskFileManager
+
+
+@patch_policies(with_ec_default=True)
+class TestECDiskFile(DiskFileMixin, unittest.TestCase):
+
+ mgr_cls = diskfile.ECDiskFileManager
+
+ def test_commit_raises_DiskFileErrors(self):
+ scenarios = ((errno.ENOSPC, DiskFileNoSpace),
+ (errno.EDQUOT, DiskFileNoSpace),
+ (errno.ENOTDIR, DiskFileError),
+ (errno.EPERM, DiskFileError))
+
+ # Check IOErrors from open() is handled
+ for err_number, expected_exception in scenarios:
+ io_error = IOError()
+ io_error.errno = err_number
+ mock_open = mock.MagicMock(side_effect=io_error)
+ df = self._simple_get_diskfile(account='a', container='c',
+ obj='o_%s' % err_number,
+ policy=POLICIES.default)
+ timestamp = Timestamp(time())
+ with df.create() as writer:
+ metadata = {
+ 'ETag': 'bogus_etag',
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': '0',
+ }
+ writer.put(metadata)
+ with mock.patch('__builtin__.open', mock_open):
+ self.assertRaises(expected_exception,
+ writer.commit,
+ timestamp)
+ dl = os.listdir(df._datadir)
+ self.assertEqual(1, len(dl), dl)
+ rmtree(df._datadir)
+
+ # Check OSError from fsync() is handled
+ mock_fsync = mock.MagicMock(side_effect=OSError)
+ df = self._simple_get_diskfile(account='a', container='c',
+ obj='o_fsync_error')
+
+ timestamp = Timestamp(time())
+ with df.create() as writer:
+ metadata = {
+ 'ETag': 'bogus_etag',
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': '0',
+ }
+ writer.put(metadata)
+ with mock.patch('swift.obj.diskfile.fsync', mock_fsync):
+ self.assertRaises(DiskFileError,
+ writer.commit, timestamp)
+
+ def test_data_file_has_frag_index(self):
+ policy = POLICIES.default
+ for good_value in (0, '0', 2, '2', 14, '14'):
+ # frag_index set by constructor arg
+ ts = self.ts().internal
+ expected = ['%s#%s.data' % (ts, good_value), '%s.durable' % ts]
+ df = self._get_open_disk_file(ts=ts, policy=policy,
+ frag_index=good_value)
+ self.assertEqual(expected, sorted(os.listdir(df._datadir)))
+ # frag index should be added to object sysmeta
+ actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
+ self.assertEqual(int(good_value), int(actual))
+
+ # metadata value overrides the constructor arg
+ ts = self.ts().internal
+ expected = ['%s#%s.data' % (ts, good_value), '%s.durable' % ts]
+ meta = {'X-Object-Sysmeta-Ec-Frag-Index': good_value}
+ df = self._get_open_disk_file(ts=ts, policy=policy,
+ frag_index='99',
+ extra_metadata=meta)
+ self.assertEqual(expected, sorted(os.listdir(df._datadir)))
+ actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
+ self.assertEqual(int(good_value), int(actual))
+
+ # metadata value alone is sufficient
+ ts = self.ts().internal
+ expected = ['%s#%s.data' % (ts, good_value), '%s.durable' % ts]
+ meta = {'X-Object-Sysmeta-Ec-Frag-Index': good_value}
+ df = self._get_open_disk_file(ts=ts, policy=policy,
+ frag_index=None,
+ extra_metadata=meta)
+ self.assertEqual(expected, sorted(os.listdir(df._datadir)))
+ actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
+ self.assertEqual(int(good_value), int(actual))
+
+ def test_sysmeta_frag_index_is_immutable(self):
+ # the X-Object-Sysmeta-Ec-Frag-Index should *only* be set when
+ # the .data file is written.
+ policy = POLICIES.default
+ orig_frag_index = 14
+ # frag_index set by constructor arg
+ ts = self.ts().internal
+ expected = ['%s#%s.data' % (ts, orig_frag_index), '%s.durable' % ts]
+ df = self._get_open_disk_file(ts=ts, policy=policy, obj_name='my_obj',
+ frag_index=orig_frag_index)
+ self.assertEqual(expected, sorted(os.listdir(df._datadir)))
+ # frag index should be added to object sysmeta
+ actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
+ self.assertEqual(int(orig_frag_index), int(actual))
+
+ # open the same diskfile with no frag_index passed to constructor
+ df = self.df_router[policy].get_diskfile(
+ self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
+ frag_index=None)
+ df.open()
+ actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
+ self.assertEqual(int(orig_frag_index), int(actual))
+
+ # write metadata to a meta file
+ ts = self.ts().internal
+ metadata = {'X-Timestamp': ts,
+ 'X-Object-Meta-Fruit': 'kiwi'}
+ df.write_metadata(metadata)
+ # sanity check we did write a meta file
+ expected.append('%s.meta' % ts)
+ actual_files = sorted(os.listdir(df._datadir))
+ self.assertEqual(expected, actual_files)
+
+ # open the same diskfile, check frag index is unchanged
+ df = self.df_router[policy].get_diskfile(
+ self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
+ frag_index=None)
+ df.open()
+ # sanity check we have read the meta file
+ self.assertEqual(ts, df.get_metadata().get('X-Timestamp'))
+ self.assertEqual('kiwi', df.get_metadata().get('X-Object-Meta-Fruit'))
+ # check frag index sysmeta is unchanged
+ actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
+ self.assertEqual(int(orig_frag_index), int(actual))
+
+ # attempt to overwrite frag index sysmeta
+ ts = self.ts().internal
+ metadata = {'X-Timestamp': ts,
+ 'X-Object-Sysmeta-Ec-Frag-Index': 99,
+ 'X-Object-Meta-Fruit': 'apple'}
+ df.write_metadata(metadata)
+
+ # open the same diskfile, check frag index is unchanged
+ df = self.df_router[policy].get_diskfile(
+ self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
+ frag_index=None)
+ df.open()
+ # sanity check we have read the meta file
+ self.assertEqual(ts, df.get_metadata().get('X-Timestamp'))
+ self.assertEqual('apple', df.get_metadata().get('X-Object-Meta-Fruit'))
+ actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
+ self.assertEqual(int(orig_frag_index), int(actual))
+
+ def test_data_file_errors_bad_frag_index(self):
+ policy = POLICIES.default
+ df_mgr = self.df_router[policy]
+ for bad_value in ('foo', '-2', -2, '3.14', 3.14):
+ # check that bad frag_index set by constructor arg raises error
+ # as soon as diskfile is constructed, before data is written
+ self.assertRaises(DiskFileError, self._simple_get_diskfile,
+ policy=policy, frag_index=bad_value)
+
+ # bad frag_index set by metadata value
+ # (drive-by check that it is ok for constructor arg to be None)
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
+ policy=policy, frag_index=None)
+ ts = self.ts()
+ meta = {'X-Object-Sysmeta-Ec-Frag-Index': bad_value,
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': 0,
+ 'Etag': EMPTY_ETAG,
+ 'Content-Type': 'plain/text'}
+ with df.create() as writer:
+ try:
+ writer.put(meta)
+ self.fail('Expected DiskFileError for frag_index %s'
+ % bad_value)
+ except DiskFileError:
+ pass
+
+ # bad frag_index set by metadata value overrides ok constructor arg
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
+ policy=policy, frag_index=2)
+ ts = self.ts()
+ meta = {'X-Object-Sysmeta-Ec-Frag-Index': bad_value,
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': 0,
+ 'Etag': EMPTY_ETAG,
+ 'Content-Type': 'plain/text'}
+ with df.create() as writer:
+ try:
+ writer.put(meta)
+ self.fail('Expected DiskFileError for frag_index %s'
+ % bad_value)
+ except DiskFileError:
+ pass
+
+ def test_purge_one_fragment_index(self):
+ ts = self.ts()
+ for frag_index in (1, 2):
+ df = self._simple_get_diskfile(frag_index=frag_index)
+ with df.create() as writer:
+ data = 'test data'
+ writer.write(data)
+ metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(data),
+ }
+ writer.put(metadata)
+ writer.commit(ts)
+
+ # sanity
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '#1.data',
+ ts.internal + '#2.data',
+ ts.internal + '.durable',
+ ])
+ df.purge(ts, 2)
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '#1.data',
+ ts.internal + '.durable',
+ ])
+
+ def test_purge_last_fragment_index(self):
+ ts = self.ts()
+ frag_index = 0
+ df = self._simple_get_diskfile(frag_index=frag_index)
+ with df.create() as writer:
+ data = 'test data'
+ writer.write(data)
+ metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(data),
+ }
+ writer.put(metadata)
+ writer.commit(ts)
+
+ # sanity
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '#0.data',
+ ts.internal + '.durable',
+ ])
+ df.purge(ts, 0)
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '.durable',
+ ])
+
+ def test_purge_non_existant_fragment_index(self):
+ ts = self.ts()
+ frag_index = 7
+ df = self._simple_get_diskfile(frag_index=frag_index)
+ with df.create() as writer:
+ data = 'test data'
+ writer.write(data)
+ metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(data),
+ }
+ writer.put(metadata)
+ writer.commit(ts)
+
+ # sanity
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '#7.data',
+ ts.internal + '.durable',
+ ])
+ df.purge(ts, 3)
+ # no effect
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '#7.data',
+ ts.internal + '.durable',
+ ])
+
+ def test_purge_old_timestamp_frag_index(self):
+ old_ts = self.ts()
+ ts = self.ts()
+ frag_index = 1
+ df = self._simple_get_diskfile(frag_index=frag_index)
+ with df.create() as writer:
+ data = 'test data'
+ writer.write(data)
+ metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(data),
+ }
+ writer.put(metadata)
+ writer.commit(ts)
+
+ # sanity
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '#1.data',
+ ts.internal + '.durable',
+ ])
+ df.purge(old_ts, 1)
+ # no effect
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '#1.data',
+ ts.internal + '.durable',
+ ])
+
+ def test_purge_tombstone(self):
+ ts = self.ts()
+ df = self._simple_get_diskfile(frag_index=3)
+ df.delete(ts)
+
+ # sanity
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '.ts',
+ ])
+ df.purge(ts, 3)
+ self.assertEqual(sorted(os.listdir(df._datadir)), [])
+
+ def test_purge_old_tombstone(self):
+ old_ts = self.ts()
+ ts = self.ts()
+ df = self._simple_get_diskfile(frag_index=5)
+ df.delete(ts)
+
+ # sanity
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '.ts',
+ ])
+ df.purge(old_ts, 5)
+ # no effect
+ self.assertEqual(sorted(os.listdir(df._datadir)), [
+ ts.internal + '.ts',
+ ])
+
+ def test_purge_already_removed(self):
+ df = self._simple_get_diskfile(frag_index=6)
+
+ df.purge(self.ts(), 6) # no errors
+
+ # sanity
+ os.makedirs(df._datadir)
+ self.assertEqual(sorted(os.listdir(df._datadir)), [])
+ df.purge(self.ts(), 6)
+ # no effect
+ self.assertEqual(sorted(os.listdir(df._datadir)), [])
+
+ def test_open_most_recent_durable(self):
+ policy = POLICIES.default
+ df_mgr = self.df_router[policy]
+
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+
+ ts = self.ts()
+ with df.create() as writer:
+ data = 'test data'
+ writer.write(data)
+ metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(data),
+ 'X-Object-Sysmeta-Ec-Frag-Index': 3,
+ }
+ writer.put(metadata)
+ writer.commit(ts)
+
+ # add some .meta stuff
+ extra_meta = {
+ 'X-Object-Meta-Foo': 'Bar',
+ 'X-Timestamp': self.ts().internal,
+ }
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+ df.write_metadata(extra_meta)
+
+ # sanity
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+ metadata.update(extra_meta)
+ self.assertEqual(metadata, df.read_metadata())
+
+ # add a newer datafile
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+ ts = self.ts()
+ with df.create() as writer:
+ data = 'test data'
+ writer.write(data)
+ new_metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(data),
+ 'X-Object-Sysmeta-Ec-Frag-Index': 3,
+ }
+ writer.put(new_metadata)
+ # N.B. don't make it durable
+
+ # and we still get the old metadata (same as if no .data!)
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+ self.assertEqual(metadata, df.read_metadata())
+
+ def test_open_most_recent_missing_durable(self):
+ policy = POLICIES.default
+ df_mgr = self.df_router[policy]
+
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+
+ self.assertRaises(DiskFileNotExist, df.read_metadata)
+
+ # now create a datafile missing durable
+ ts = self.ts()
+ with df.create() as writer:
+ data = 'test data'
+ writer.write(data)
+ new_metadata = {
+ 'ETag': md5(data).hexdigest(),
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(data),
+ 'X-Object-Sysmeta-Ec-Frag-Index': 3,
+ }
+ writer.put(new_metadata)
+ # N.B. don't make it durable
+
+ # add some .meta stuff
+ extra_meta = {
+ 'X-Object-Meta-Foo': 'Bar',
+ 'X-Timestamp': self.ts().internal,
+ }
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+ df.write_metadata(extra_meta)
+
+ # we still get the DiskFileNotExist (same as if no .data!)
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy,
+ frag_index=3)
+ self.assertRaises(DiskFileNotExist, df.read_metadata)
+
+ # sanity, withtout the frag_index kwarg
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ 'a', 'c', 'o', policy=policy)
+ self.assertRaises(DiskFileNotExist, df.read_metadata)
+
+
+@patch_policies(with_ec_default=True)
+class TestSuffixHashes(unittest.TestCase):
+ """
+ This tests all things related to hashing suffixes and therefore
+ there's also few test methods for hash_cleanup_listdir as well
+ (because it's used by hash_suffix).
+
+ The public interface to suffix hashing is on the Manager::
+
+ * hash_cleanup_listdir(hsh_path)
+ * get_hashes(device, partition, suffixes, policy)
+ * invalidate_hash(suffix_dir)
+
+ The Manager.get_hashes method (used by the REPLICATION verb)
+ calls Manager._get_hashes (which may be an alias to the module
+ method get_hashes), which calls hash_suffix, which calls
+ hash_cleanup_listdir.
+
+ Outside of that, hash_cleanup_listdir and invalidate_hash are
+ used mostly after writing new files via PUT or DELETE.
+
+ Test methods are organized by::
+
+ * hash_cleanup_listdir tests - behaviors
+ * hash_cleanup_listdir tests - error handling
+ * invalidate_hash tests - behavior
+ * invalidate_hash tests - error handling
+ * get_hashes tests - hash_suffix behaviors
+ * get_hashes tests - hash_suffix error handling
+ * get_hashes tests - behaviors
+ * get_hashes tests - error handling
+
+ """
+
+ def setUp(self):
+ self.testdir = tempfile.mkdtemp()
+ self.logger = debug_logger('suffix-hash-test')
+ self.devices = os.path.join(self.testdir, 'node')
+ os.mkdir(self.devices)
+ self.existing_device = 'sda1'
+ os.mkdir(os.path.join(self.devices, self.existing_device))
+ self.conf = {
+ 'swift_dir': self.testdir,
+ 'devices': self.devices,
+ 'mount_check': False,
+ }
+ self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
+ self._ts_iter = (Timestamp(t) for t in
+ itertools.count(int(time())))
+ self.policy = None
+
+ def ts(self):
+ """
+ Timestamps - forever.
+ """
+ return next(self._ts_iter)
+
+ def fname_to_ts_hash(self, fname):
+ """
+ EC datafiles are only hashed by their timestamp
+ """
+ return md5(fname.split('#', 1)[0]).hexdigest()
+
+ def tearDown(self):
+ rmtree(self.testdir, ignore_errors=1)
+
+ def iter_policies(self):
+ for policy in POLICIES:
+ self.policy = policy
+ yield policy
+
+ def assertEqual(self, *args):
+ try:
+ unittest.TestCase.assertEqual(self, *args)
+ except AssertionError as err:
+ if not self.policy:
+ raise
+ policy_trailer = '\n\n... for policy %r' % self.policy
+ raise AssertionError(str(err) + policy_trailer)
+
+ def _datafilename(self, timestamp, policy, frag_index=None):
+ if frag_index is None:
+ frag_index = randint(0, 9)
+ filename = timestamp.internal
+ if policy.policy_type == EC_POLICY:
+ filename += '#%d' % frag_index
+ filename += '.data'
+ return filename
+
+ def check_hash_cleanup_listdir(self, policy, input_files, output_files):
+ orig_unlink = os.unlink
+ file_list = list(input_files)
+
+ def mock_listdir(path):
+ return list(file_list)
+
+ def mock_unlink(path):
+ # timestamp 1 is a special tag to pretend a file disappeared
+ # between the listdir and unlink.
+ if '/0000000001.00000.' in path:
+ # Using actual os.unlink for a non-existent name to reproduce
+ # exactly what OSError it raises in order to prove that
+ # common.utils.remove_file is squelching the error - but any
+ # OSError would do.
+ orig_unlink(uuid.uuid4().hex)
+ file_list.remove(os.path.basename(path))
+
+ df_mgr = self.df_router[policy]
+ with unit_mock({'os.listdir': mock_listdir, 'os.unlink': mock_unlink}):
+ if isinstance(output_files, Exception):
+ path = os.path.join(self.testdir, 'does-not-matter')
+ self.assertRaises(output_files.__class__,
+ df_mgr.hash_cleanup_listdir, path)
+ return
+ files = df_mgr.hash_cleanup_listdir('/whatever')
+ self.assertEquals(files, output_files)
+
+ # hash_cleanup_listdir tests - behaviors
+
+ def test_hash_cleanup_listdir_purge_data_newer_ts(self):
+ for policy in self.iter_policies():
+ # purge .data if there's a newer .ts
+ file1 = self._datafilename(self.ts(), policy)
+ file2 = self.ts().internal + '.ts'
+ file_list = [file1, file2]
+ self.check_hash_cleanup_listdir(policy, file_list, [file2])
+
+ def test_hash_cleanup_listdir_purge_expired_ts(self):
+ for policy in self.iter_policies():
+ # purge older .ts files if there's a newer .data
+ file1 = self.ts().internal + '.ts'
+ file2 = self.ts().internal + '.ts'
+ timestamp = self.ts()
+ file3 = self._datafilename(timestamp, policy)
+ file_list = [file1, file2, file3]
+ expected = {
+ # no durable datafile means you can't get rid of the
+ # latest tombstone even if datafile is newer
+ EC_POLICY: [file3, file2],
+ REPL_POLICY: [file3],
+ }[policy.policy_type]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_purge_ts_newer_data(self):
+ for policy in self.iter_policies():
+ # purge .ts if there's a newer .data
+ file1 = self.ts().internal + '.ts'
+ timestamp = self.ts()
+ file2 = self._datafilename(timestamp, policy)
+ file_list = [file1, file2]
+ if policy.policy_type == EC_POLICY:
+ durable_file = timestamp.internal + '.durable'
+ file_list.append(durable_file)
+ expected = {
+ EC_POLICY: [durable_file, file2],
+ REPL_POLICY: [file2],
+ }[policy.policy_type]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_purge_older_ts(self):
+ for policy in self.iter_policies():
+ file1 = self.ts().internal + '.ts'
+ file2 = self.ts().internal + '.ts'
+ file3 = self._datafilename(self.ts(), policy)
+ file4 = self.ts().internal + '.meta'
+ expected = {
+ # no durable means we can only throw out things before
+ # the latest tombstone
+ EC_POLICY: [file4, file3, file2],
+ # keep .meta and .data and purge all .ts files
+ REPL_POLICY: [file4, file3],
+ }[policy.policy_type]
+ file_list = [file1, file2, file3, file4]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_keep_meta_data_purge_ts(self):
+ for policy in self.iter_policies():
+ file1 = self.ts().internal + '.ts'
+ file2 = self.ts().internal + '.ts'
+ timestamp = self.ts()
+ file3 = self._datafilename(timestamp, policy)
+ file_list = [file1, file2, file3]
+ if policy.policy_type == EC_POLICY:
+ durable_filename = timestamp.internal + '.durable'
+ file_list.append(durable_filename)
+ file4 = self.ts().internal + '.meta'
+ file_list.append(file4)
+ # keep .meta and .data if meta newer than data and purge .ts
+ expected = {
+ EC_POLICY: [file4, durable_filename, file3],
+ REPL_POLICY: [file4, file3],
+ }[policy.policy_type]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_keep_one_ts(self):
+ for policy in self.iter_policies():
+ file1, file2, file3 = [self.ts().internal + '.ts'
+ for i in range(3)]
+ file_list = [file1, file2, file3]
+ # keep only latest of multiple .ts files
+ self.check_hash_cleanup_listdir(policy, file_list, [file3])
+
+ def test_hash_cleanup_listdir_multi_data_file(self):
+ for policy in self.iter_policies():
+ file1 = self._datafilename(self.ts(), policy, 1)
+ file2 = self._datafilename(self.ts(), policy, 2)
+ file3 = self._datafilename(self.ts(), policy, 3)
+ expected = {
+ # keep all non-durable datafiles
+ EC_POLICY: [file3, file2, file1],
+ # keep only latest of multiple .data files
+ REPL_POLICY: [file3]
+ }[policy.policy_type]
+ file_list = [file1, file2, file3]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_keeps_one_datafile(self):
+ for policy in self.iter_policies():
+ timestamps = [self.ts() for i in range(3)]
+ file1 = self._datafilename(timestamps[0], policy, 1)
+ file2 = self._datafilename(timestamps[1], policy, 2)
+ file3 = self._datafilename(timestamps[2], policy, 3)
+ file_list = [file1, file2, file3]
+ if policy.policy_type == EC_POLICY:
+ for t in timestamps:
+ file_list.append(t.internal + '.durable')
+ latest_durable = file_list[-1]
+ expected = {
+ # keep latest durable and datafile
+ EC_POLICY: [latest_durable, file3],
+ # keep only latest of multiple .data files
+ REPL_POLICY: [file3]
+ }[policy.policy_type]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_keep_one_meta(self):
+ for policy in self.iter_policies():
+ # keep only latest of multiple .meta files
+ t_data = self.ts()
+ file1 = self._datafilename(t_data, policy)
+ file2, file3 = [self.ts().internal + '.meta' for i in range(2)]
+ file_list = [file1, file2, file3]
+ if policy.policy_type == EC_POLICY:
+ durable_file = t_data.internal + '.durable'
+ file_list.append(durable_file)
+ expected = {
+ EC_POLICY: [file3, durable_file, file1],
+ REPL_POLICY: [file3, file1]
+ }[policy.policy_type]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_only_meta(self):
+ for policy in self.iter_policies():
+ file1, file2 = [self.ts().internal + '.meta' for i in range(2)]
+ file_list = [file1, file2]
+ if policy.policy_type == EC_POLICY:
+ # EC policy does tolerate only .meta's in dir when cleaning up
+ expected = [file2]
+ else:
+ # the get_ondisk_files contract validation doesn't allow a
+ # directory with only .meta files
+ expected = AssertionError()
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_ignore_orphaned_ts(self):
+ for policy in self.iter_policies():
+ # A more recent orphaned .meta file will prevent old .ts files
+ # from being cleaned up otherwise
+ file1, file2 = [self.ts().internal + '.ts' for i in range(2)]
+ file3 = self.ts().internal + '.meta'
+ file_list = [file1, file2, file3]
+ self.check_hash_cleanup_listdir(policy, file_list, [file3, file2])
+
+ def test_hash_cleanup_listdir_purge_old_data_only(self):
+ for policy in self.iter_policies():
+ # Oldest .data will be purge, .meta and .ts won't be touched
+ file1 = self._datafilename(self.ts(), policy)
+ file2 = self.ts().internal + '.ts'
+ file3 = self.ts().internal + '.meta'
+ file_list = [file1, file2, file3]
+ self.check_hash_cleanup_listdir(policy, file_list, [file3, file2])
+
+ def test_hash_cleanup_listdir_purge_old_ts(self):
+ for policy in self.iter_policies():
+ # A single old .ts file will be removed
+ old_float = time() - (diskfile.ONE_WEEK + 1)
+ file1 = Timestamp(old_float).internal + '.ts'
+ file_list = [file1]
+ self.check_hash_cleanup_listdir(policy, file_list, [])
+
+ def test_hash_cleanup_listdir_meta_keeps_old_ts(self):
+ for policy in self.iter_policies():
+ old_float = time() - (diskfile.ONE_WEEK + 1)
+ file1 = Timestamp(old_float).internal + '.ts'
+ file2 = Timestamp(time() + 2).internal + '.meta'
+ file_list = [file1, file2]
+ if policy.policy_type == EC_POLICY:
+ # EC will clean up old .ts despite a .meta
+ expected = [file2]
+ else:
+ # An orphaned .meta will not clean up a very old .ts
+ expected = [file2, file1]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_keep_single_old_data(self):
+ for policy in self.iter_policies():
+ old_float = time() - (diskfile.ONE_WEEK + 1)
+ file1 = self._datafilename(Timestamp(old_float), policy)
+ file_list = [file1]
+ if policy.policy_type == EC_POLICY:
+ # for EC an isolated old .data file is removed, its useless
+ # without a .durable
+ expected = []
+ else:
+ # A single old .data file will not be removed
+ expected = file_list
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ def test_hash_cleanup_listdir_drops_isolated_durable(self):
+ for policy in self.iter_policies():
+ if policy.policy_type == EC_POLICY:
+ file1 = Timestamp(time()).internal + '.durable'
+ file_list = [file1]
+ self.check_hash_cleanup_listdir(policy, file_list, [])
+
+ def test_hash_cleanup_listdir_keep_single_old_meta(self):
+ for policy in self.iter_policies():
+ # A single old .meta file will not be removed
+ old_float = time() - (diskfile.ONE_WEEK + 1)
+ file1 = Timestamp(old_float).internal + '.meta'
+ file_list = [file1]
+ self.check_hash_cleanup_listdir(policy, file_list, [file1])
+
+ # hash_cleanup_listdir tests - error handling
+
+ def test_hash_cleanup_listdir_hsh_path_enoent(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # common.utils.listdir *completely* mutes ENOENT
+ path = os.path.join(self.testdir, 'does-not-exist')
+ self.assertEqual(df_mgr.hash_cleanup_listdir(path), [])
+
+ def test_hash_cleanup_listdir_hsh_path_other_oserror(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ with mock.patch('os.listdir') as mock_listdir:
+ mock_listdir.side_effect = OSError('kaboom!')
+ # but it will raise other OSErrors
+ path = os.path.join(self.testdir, 'does-not-matter')
+ self.assertRaises(OSError, df_mgr.hash_cleanup_listdir,
+ path)
+
+ def test_hash_cleanup_listdir_reclaim_tombstone_remove_file_error(self):
+ for policy in self.iter_policies():
+ # Timestamp 1 makes the check routine pretend the file
+ # disappeared after listdir before unlink.
+ file1 = '0000000001.00000.ts'
+ file_list = [file1]
+ self.check_hash_cleanup_listdir(policy, file_list, [])
+
+ def test_hash_cleanup_listdir_older_remove_file_error(self):
+ for policy in self.iter_policies():
+ # Timestamp 1 makes the check routine pretend the file
+ # disappeared after listdir before unlink.
+ file1 = self._datafilename(Timestamp(1), policy)
+ file2 = '0000000002.00000.ts'
+ file_list = [file1, file2]
+ if policy.policy_type == EC_POLICY:
+ # the .ts gets reclaimed up despite failed .data delete
+ expected = []
+ else:
+ # the .ts isn't reclaimed because there were two files in dir
+ expected = [file2]
+ self.check_hash_cleanup_listdir(policy, file_list, expected)
+
+ # invalidate_hash tests - behavior
+
+ def test_invalidate_hash_file_does_not_exist(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
+ policy=policy)
+ suffix_dir = os.path.dirname(df._datadir)
+ part_path = os.path.join(self.devices, 'sda1',
+ diskfile.get_data_dir(policy), '0')
+ hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
+ self.assertFalse(os.path.exists(hashes_file)) # sanity
+ with mock.patch('swift.obj.diskfile.lock_path') as mock_lock:
+ df_mgr.invalidate_hash(suffix_dir)
+ self.assertFalse(mock_lock.called)
+ # does not create file
+ self.assertFalse(os.path.exists(hashes_file))
+
+ def test_invalidate_hash_file_exists(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # create something to hash
+ df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
+ policy=policy)
+ df.delete(self.ts())
+ suffix_dir = os.path.dirname(df._datadir)
+ suffix = os.path.basename(suffix_dir)
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ self.assertTrue(suffix in hashes) # sanity
+ # sanity check hashes file
+ part_path = os.path.join(self.devices, 'sda1',
+ diskfile.get_data_dir(policy), '0')
+ hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
+ with open(hashes_file, 'rb') as f:
+ self.assertEqual(hashes, pickle.load(f))
+ # invalidate the hash
+ with mock.patch('swift.obj.diskfile.lock_path') as mock_lock:
+ df_mgr.invalidate_hash(suffix_dir)
+ self.assertTrue(mock_lock.called)
+ with open(hashes_file, 'rb') as f:
+ self.assertEqual({suffix: None}, pickle.load(f))
+
+ # invalidate_hash tests - error handling
+
+ def test_invalidate_hash_bad_pickle(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # make some valid data
+ df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
+ policy=policy)
+ suffix_dir = os.path.dirname(df._datadir)
+ suffix = os.path.basename(suffix_dir)
+ df.delete(self.ts())
+ # sanity check hashes file
+ part_path = os.path.join(self.devices, 'sda1',
+ diskfile.get_data_dir(policy), '0')
+ hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
+ self.assertFalse(os.path.exists(hashes_file))
+ # write some garbage in hashes file
+ with open(hashes_file, 'w') as f:
+ f.write('asdf')
+ # invalidate_hash silently *NOT* repair invalid data
+ df_mgr.invalidate_hash(suffix_dir)
+ with open(hashes_file) as f:
+ self.assertEqual(f.read(), 'asdf')
+ # ... but get_hashes will
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ self.assertTrue(suffix in hashes)
+
+ # get_hashes tests - hash_suffix behaviors
+
+ def test_hash_suffix_one_tombstone(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile(
+ 'sda1', '0', 'a', 'c', 'o', policy=policy)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ # write a tombstone
+ timestamp = self.ts()
+ df.delete(timestamp)
+ tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ expected = {
+ REPL_POLICY: {suffix: tombstone_hash},
+ EC_POLICY: {suffix: {
+ # fi is None here because we have a tombstone
+ None: tombstone_hash}},
+ }[policy.policy_type]
+ self.assertEqual(hashes, expected)
+
+ def test_hash_suffix_one_reclaim_tombstone(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile(
+ 'sda1', '0', 'a', 'c', 'o', policy=policy)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ # scale back this tests manager's reclaim age a bit
+ df_mgr.reclaim_age = 1000
+ # write a tombstone that's just a *little* older
+ old_time = time() - 1001
+ timestamp = Timestamp(old_time)
+ df.delete(timestamp.internal)
+ tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ expected = {
+ # repl is broken, it doesn't use self.reclaim_age
+ REPL_POLICY: tombstone_hash,
+ EC_POLICY: {},
+ }[policy.policy_type]
+ self.assertEqual(hashes, {suffix: expected})
+
+ def test_hash_suffix_one_datafile(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile(
+ 'sda1', '0', 'a', 'c', 'o', policy=policy, frag_index=7)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ # write a datafile
+ timestamp = self.ts()
+ with df.create() as writer:
+ test_data = 'test file'
+ writer.write(test_data)
+ metadata = {
+ 'X-Timestamp': timestamp.internal,
+ 'ETag': md5(test_data).hexdigest(),
+ 'Content-Length': len(test_data),
+ }
+ writer.put(metadata)
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ datafile_hash = md5({
+ EC_POLICY: timestamp.internal,
+ REPL_POLICY: timestamp.internal + '.data',
+ }[policy.policy_type]).hexdigest()
+ expected = {
+ REPL_POLICY: {suffix: datafile_hash},
+ EC_POLICY: {suffix: {
+ # because there's no .durable file, we have no hash for
+ # the None key - only the frag index for the data file
+ 7: datafile_hash}},
+ }[policy.policy_type]
+ msg = 'expected %r != %r for policy %r' % (
+ expected, hashes, policy)
+ self.assertEqual(hashes, expected, msg)
+
+ def test_hash_suffix_multi_file_ends_in_tombstone(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o', policy=policy,
+ frag_index=4)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ mkdirs(df._datadir)
+ now = time()
+ # go behind the scenes and setup a bunch of weird file names
+ for tdiff in [500, 100, 10, 1]:
+ for suff in ['.meta', '.data', '.ts']:
+ timestamp = Timestamp(now - tdiff)
+ filename = timestamp.internal
+ if policy.policy_type == EC_POLICY and suff == '.data':
+ filename += '#%s' % df._frag_index
+ filename += suff
+ open(os.path.join(df._datadir, filename), 'w').close()
+ tombstone_hash = md5(filename).hexdigest()
+ # call get_hashes and it should clean things up
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ expected = {
+ REPL_POLICY: {suffix: tombstone_hash},
+ EC_POLICY: {suffix: {
+ # fi is None here because we have a tombstone
+ None: tombstone_hash}},
+ }[policy.policy_type]
+ self.assertEqual(hashes, expected)
+ # only the tombstone should be left
+ found_files = os.listdir(df._datadir)
+ self.assertEqual(found_files, [filename])
+
+ def test_hash_suffix_multi_file_ends_in_datafile(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o', policy=policy,
+ frag_index=4)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ mkdirs(df._datadir)
+ now = time()
+ timestamp = None
+ # go behind the scenes and setup a bunch of weird file names
+ for tdiff in [500, 100, 10, 1]:
+ suffs = ['.meta', '.data']
+ if tdiff > 50:
+ suffs.append('.ts')
+ if policy.policy_type == EC_POLICY:
+ suffs.append('.durable')
+ for suff in suffs:
+ timestamp = Timestamp(now - tdiff)
+ filename = timestamp.internal
+ if policy.policy_type == EC_POLICY and suff == '.data':
+ filename += '#%s' % df._frag_index
+ filename += suff
+ open(os.path.join(df._datadir, filename), 'w').close()
+ # call get_hashes and it should clean things up
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ data_filename = timestamp.internal
+ if policy.policy_type == EC_POLICY:
+ data_filename += '#%s' % df._frag_index
+ data_filename += '.data'
+ metadata_filename = timestamp.internal + '.meta'
+ durable_filename = timestamp.internal + '.durable'
+ if policy.policy_type == EC_POLICY:
+ hasher = md5()
+ hasher.update(metadata_filename)
+ hasher.update(durable_filename)
+ expected = {
+ suffix: {
+ # metadata & durable updates are hashed separately
+ None: hasher.hexdigest(),
+ 4: self.fname_to_ts_hash(data_filename),
+ }
+ }
+ expected_files = [data_filename, durable_filename,
+ metadata_filename]
+ elif policy.policy_type == REPL_POLICY:
+ hasher = md5()
+ hasher.update(metadata_filename)
+ hasher.update(data_filename)
+ expected = {suffix: hasher.hexdigest()}
+ expected_files = [data_filename, metadata_filename]
+ else:
+ self.fail('unknown policy type %r' % policy.policy_type)
+ msg = 'expected %r != %r for policy %r' % (
+ expected, hashes, policy)
+ self.assertEqual(hashes, expected, msg)
+ # only the meta and data should be left
+ self.assertEqual(sorted(os.listdir(df._datadir)),
+ sorted(expected_files))
+
+ def test_hash_suffix_removes_empty_hashdir_and_suffix(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
+ policy=policy, frag_index=2)
+ os.makedirs(df._datadir)
+ self.assertTrue(os.path.exists(df._datadir)) # sanity
+ df_mgr.get_hashes('sda1', '0', [], policy)
+ suffix_dir = os.path.dirname(df._datadir)
+ self.assertFalse(os.path.exists(suffix_dir))
+
+ def test_hash_suffix_removes_empty_hashdirs_in_valid_suffix(self):
+ paths, suffix = find_paths_with_matching_suffixes(needed_matches=3,
+ needed_suffixes=0)
+ matching_paths = paths.pop(suffix)
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile('sda1', '0', *matching_paths[0],
+ policy=policy, frag_index=2)
+ # create a real, valid hsh_path
+ df.delete(Timestamp(time()))
+ # and a couple of empty hsh_paths
+ empty_hsh_paths = []
+ for path in matching_paths[1:]:
+ fake_df = df_mgr.get_diskfile('sda1', '0', *path,
+ policy=policy)
+ os.makedirs(fake_df._datadir)
+ empty_hsh_paths.append(fake_df._datadir)
+ for hsh_path in empty_hsh_paths:
+ self.assertTrue(os.path.exists(hsh_path)) # sanity
+ # get_hashes will cleanup empty hsh_path and leave valid one
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ self.assertTrue(suffix in hashes)
+ self.assertTrue(os.path.exists(df._datadir))
+ for hsh_path in empty_hsh_paths:
+ self.assertFalse(os.path.exists(hsh_path))
+
+ # get_hashes tests - hash_suffix error handling
+
+ def test_hash_suffix_listdir_enotdir(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ suffix = '123'
+ suffix_path = os.path.join(self.devices, 'sda1',
+ diskfile.get_data_dir(policy), '0',
+ suffix)
+ os.makedirs(suffix_path)
+ self.assertTrue(os.path.exists(suffix_path)) # sanity
+ hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
+ # suffix dir cleaned up by get_hashes
+ self.assertFalse(os.path.exists(suffix_path))
+ expected = {
+ EC_POLICY: {'123': {}},
+ REPL_POLICY: {'123': EMPTY_ETAG},
+ }[policy.policy_type]
+ msg = 'expected %r != %r for policy %r' % (expected, hashes,
+ policy)
+ self.assertEqual(hashes, expected, msg)
+
+ # now make the suffix path a file
+ open(suffix_path, 'w').close()
+ hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
+ expected = {}
+ msg = 'expected %r != %r for policy %r' % (expected, hashes,
+ policy)
+ self.assertEqual(hashes, expected, msg)
+
+ def test_hash_suffix_listdir_enoent(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ orig_listdir = os.listdir
+ listdir_calls = []
+
+ def mock_listdir(path):
+ success = False
+ try:
+ rv = orig_listdir(path)
+ success = True
+ return rv
+ finally:
+ listdir_calls.append((path, success))
+
+ with mock.patch('swift.obj.diskfile.os.listdir',
+ mock_listdir):
+ # recalc always forces hash_suffix even if the suffix
+ # does not exist!
+ df_mgr.get_hashes('sda1', '0', ['123'], policy)
+
+ part_path = os.path.join(self.devices, 'sda1',
+ diskfile.get_data_dir(policy), '0')
+
+ self.assertEqual(listdir_calls, [
+ # part path gets created automatically
+ (part_path, True),
+ # this one blows up
+ (os.path.join(part_path, '123'), False),
+ ])
+
+ def test_hash_suffix_hash_cleanup_listdir_enotdir_quarantined(self):
+ for policy in self.iter_policies():
+ df = self.df_router[policy].get_diskfile(
+ self.existing_device, '0', 'a', 'c', 'o', policy=policy)
+ # make the suffix directory
+ suffix_path = os.path.dirname(df._datadir)
+ os.makedirs(suffix_path)
+ suffix = os.path.basename(suffix_path)
+
+ # make the df hash path a file
+ open(df._datadir, 'wb').close()
+ df_mgr = self.df_router[policy]
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
+ policy)
+ expected = {
+ REPL_POLICY: {suffix: EMPTY_ETAG},
+ EC_POLICY: {suffix: {}},
+ }[policy.policy_type]
+ self.assertEqual(hashes, expected)
+ # and hash path is quarantined
+ self.assertFalse(os.path.exists(df._datadir))
+ # each device a quarantined directory
+ quarantine_base = os.path.join(self.devices,
+ self.existing_device, 'quarantined')
+ # the quarantine path is...
+ quarantine_path = os.path.join(
+ quarantine_base, # quarantine root
+ diskfile.get_data_dir(policy), # per-policy data dir
+ suffix, # first dir from which quarantined file was removed
+ os.path.basename(df._datadir) # name of quarantined file
+ )
+ self.assertTrue(os.path.exists(quarantine_path))
+
+ def test_hash_suffix_hash_cleanup_listdir_other_oserror(self):
+ for policy in self.iter_policies():
+ timestamp = self.ts()
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
+ 'o', policy=policy,
+ frag_index=7)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ with df.create() as writer:
+ test_data = 'test_data'
+ writer.write(test_data)
+ metadata = {
+ 'X-Timestamp': timestamp.internal,
+ 'ETag': md5(test_data).hexdigest(),
+ 'Content-Length': len(test_data),
+ }
+ writer.put(metadata)
+
+ orig_os_listdir = os.listdir
+ listdir_calls = []
+
+ part_path = os.path.join(self.devices, self.existing_device,
+ diskfile.get_data_dir(policy), '0')
+ suffix_path = os.path.join(part_path, suffix)
+ datadir_path = os.path.join(suffix_path, hash_path('a', 'c', 'o'))
+
+ def mock_os_listdir(path):
+ listdir_calls.append(path)
+ if path == datadir_path:
+ # we want the part and suffix listdir calls to pass and
+ # make the hash_cleanup_listdir raise an exception
+ raise OSError(errno.EACCES, os.strerror(errno.EACCES))
+ return orig_os_listdir(path)
+
+ with mock.patch('os.listdir', mock_os_listdir):
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+
+ self.assertEqual(listdir_calls, [
+ part_path,
+ suffix_path,
+ datadir_path,
+ ])
+ expected = {suffix: None}
+ msg = 'expected %r != %r for policy %r' % (
+ expected, hashes, policy)
+ self.assertEqual(hashes, expected, msg)
+
+ def test_hash_suffix_rmdir_hsh_path_oserror(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # make an empty hsh_path to be removed
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
+ 'o', policy=policy)
+ os.makedirs(df._datadir)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ with mock.patch('os.rmdir', side_effect=OSError()):
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+ expected = {
+ EC_POLICY: {},
+ REPL_POLICY: md5().hexdigest(),
+ }[policy.policy_type]
+ self.assertEqual(hashes, {suffix: expected})
+ self.assertTrue(os.path.exists(df._datadir))
+
+ def test_hash_suffix_rmdir_suffix_oserror(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # make an empty hsh_path to be removed
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
+ 'o', policy=policy)
+ os.makedirs(df._datadir)
+ suffix_path = os.path.dirname(df._datadir)
+ suffix = os.path.basename(suffix_path)
+
+ captured_paths = []
+
+ def mock_rmdir(path):
+ captured_paths.append(path)
+ if path == suffix_path:
+ raise OSError('kaboom!')
+
+ with mock.patch('os.rmdir', mock_rmdir):
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+ expected = {
+ EC_POLICY: {},
+ REPL_POLICY: md5().hexdigest(),
+ }[policy.policy_type]
+ self.assertEqual(hashes, {suffix: expected})
+ self.assertTrue(os.path.exists(suffix_path))
+ self.assertEqual([
+ df._datadir,
+ suffix_path,
+ ], captured_paths)
+
+ # get_hashes tests - behaviors
+
+ def test_get_hashes_creates_partition_and_pkl(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+ self.assertEqual(hashes, {})
+ part_path = os.path.join(
+ self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
+ self.assertTrue(os.path.exists(part_path))
+ hashes_file = os.path.join(part_path,
+ diskfile.HASH_FILE)
+ self.assertTrue(os.path.exists(hashes_file))
+
+ # and double check the hashes
+ new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+ self.assertEqual(hashes, new_hashes)
+
+ def test_get_hashes_new_pkl_finds_new_suffix_dirs(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ part_path = os.path.join(
+ self.devices, self.existing_device,
+ diskfile.get_data_dir(policy), '0')
+ hashes_file = os.path.join(part_path,
+ diskfile.HASH_FILE)
+ # add something to find
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
+ 'o', policy=policy, frag_index=4)
+ timestamp = self.ts()
+ df.delete(timestamp)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ # get_hashes will find the untracked suffix dir
+ self.assertFalse(os.path.exists(hashes_file)) # sanity
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
+ self.assertTrue(suffix in hashes)
+ # ... and create a hashes pickle for it
+ self.assertTrue(os.path.exists(hashes_file))
+
+ def test_get_hashes_old_pickle_does_not_find_new_suffix_dirs(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # create a empty stale pickle
+ part_path = os.path.join(
+ self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
+ hashes_file = os.path.join(part_path,
+ diskfile.HASH_FILE)
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
+ self.assertEqual(hashes, {})
+ self.assertTrue(os.path.exists(hashes_file)) # sanity
+ # add something to find
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
+ policy=policy, frag_index=4)
+ os.makedirs(df._datadir)
+ filename = Timestamp(time()).internal + '.ts'
+ open(os.path.join(df._datadir, filename), 'w').close()
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ # but get_hashes has no reason to find it (because we didn't
+ # call invalidate_hash)
+ new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+ self.assertEqual(new_hashes, hashes)
+ # ... unless remote end asks for a recalc
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
+ policy)
+ self.assertTrue(suffix in hashes)
+
+ def test_get_hashes_does_not_rehash_known_suffix_dirs(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
+ 'o', policy=policy, frag_index=4)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ timestamp = self.ts()
+ df.delete(timestamp)
+ # create the baseline hashes file
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
+ self.assertTrue(suffix in hashes)
+ # now change the contents of the suffix w/o calling
+ # invalidate_hash
+ rmtree(df._datadir)
+ suffix_path = os.path.dirname(df._datadir)
+ self.assertTrue(os.path.exists(suffix_path)) # sanity
+ new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+ # ... and get_hashes is none the wiser
+ self.assertEqual(new_hashes, hashes)
+
+ # ... unless remote end asks for a recalc
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
+ policy)
+ self.assertNotEqual(new_hashes, hashes)
+ # and the empty suffix path is removed
+ self.assertFalse(os.path.exists(suffix_path))
+ # ... but is hashed as "empty"
+ expected = {
+ EC_POLICY: {},
+ REPL_POLICY: md5().hexdigest(),
+ }[policy.policy_type]
+ self.assertEqual({suffix: expected}, hashes)
+
+ def test_get_hashes_multi_file_multi_suffix(self):
+ paths, suffix = find_paths_with_matching_suffixes(needed_matches=2,
+ needed_suffixes=3)
+ matching_paths = paths.pop(suffix)
+ matching_paths.sort(key=lambda path: hash_path(*path))
+ other_paths = []
+ for suffix, paths in paths.items():
+ other_paths.append(paths[0])
+ if len(other_paths) >= 2:
+ break
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # first we'll make a tombstone
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ *other_paths[0], policy=policy,
+ frag_index=4)
+ timestamp = self.ts()
+ df.delete(timestamp)
+ tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
+ tombstone_suffix = os.path.basename(os.path.dirname(df._datadir))
+ # second file in another suffix has a .datafile
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ *other_paths[1], policy=policy,
+ frag_index=5)
+ timestamp = self.ts()
+ with df.create() as writer:
+ test_data = 'test_file'
+ writer.write(test_data)
+ metadata = {
+ 'X-Timestamp': timestamp.internal,
+ 'ETag': md5(test_data).hexdigest(),
+ 'Content-Length': len(test_data),
+ }
+ writer.put(metadata)
+ writer.commit(timestamp)
+ datafile_name = timestamp.internal
+ if policy.policy_type == EC_POLICY:
+ datafile_name += '#%d' % df._frag_index
+ datafile_name += '.data'
+ durable_hash = md5(timestamp.internal + '.durable').hexdigest()
+ datafile_suffix = os.path.basename(os.path.dirname(df._datadir))
+ # in the *third* suffix - two datafiles for different hashes
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ *matching_paths[0], policy=policy,
+ frag_index=6)
+ matching_suffix = os.path.basename(os.path.dirname(df._datadir))
+ timestamp = self.ts()
+ with df.create() as writer:
+ test_data = 'test_file'
+ writer.write(test_data)
+ metadata = {
+ 'X-Timestamp': timestamp.internal,
+ 'ETag': md5(test_data).hexdigest(),
+ 'Content-Length': len(test_data),
+ }
+ writer.put(metadata)
+ writer.commit(timestamp)
+ # we'll keep track of file names for hash calculations
+ filename = timestamp.internal
+ if policy.policy_type == EC_POLICY:
+ filename += '#%d' % df._frag_index
+ filename += '.data'
+ filenames = {
+ 'data': {
+ 6: filename
+ },
+ 'durable': [timestamp.internal + '.durable'],
+ }
+ df = df_mgr.get_diskfile(self.existing_device, '0',
+ *matching_paths[1], policy=policy,
+ frag_index=7)
+ self.assertEqual(os.path.basename(os.path.dirname(df._datadir)),
+ matching_suffix) # sanity
+ timestamp = self.ts()
+ with df.create() as writer:
+ test_data = 'test_file'
+ writer.write(test_data)
+ metadata = {
+ 'X-Timestamp': timestamp.internal,
+ 'ETag': md5(test_data).hexdigest(),
+ 'Content-Length': len(test_data),
+ }
+ writer.put(metadata)
+ writer.commit(timestamp)
+ filename = timestamp.internal
+ if policy.policy_type == EC_POLICY:
+ filename += '#%d' % df._frag_index
+ filename += '.data'
+ filenames['data'][7] = filename
+ filenames['durable'].append(timestamp.internal + '.durable')
+ # now make up the expected suffixes!
+ if policy.policy_type == EC_POLICY:
+ hasher = md5()
+ for filename in filenames['durable']:
+ hasher.update(filename)
+ expected = {
+ tombstone_suffix: {
+ None: tombstone_hash,
+ },
+ datafile_suffix: {
+ None: durable_hash,
+ 5: self.fname_to_ts_hash(datafile_name),
+ },
+ matching_suffix: {
+ None: hasher.hexdigest(),
+ 6: self.fname_to_ts_hash(filenames['data'][6]),
+ 7: self.fname_to_ts_hash(filenames['data'][7]),
+ },
+ }
+ elif policy.policy_type == REPL_POLICY:
+ hasher = md5()
+ for filename in filenames['data'].values():
+ hasher.update(filename)
+ expected = {
+ tombstone_suffix: tombstone_hash,
+ datafile_suffix: md5(datafile_name).hexdigest(),
+ matching_suffix: hasher.hexdigest(),
+ }
+ else:
+ self.fail('unknown policy type %r' % policy.policy_type)
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy)
+ self.assertEqual(hashes, expected)
+
+ # get_hashes tests - error handling
+
+ def test_get_hashes_bad_dev(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ df_mgr.mount_check = True
+ with mock.patch('swift.obj.diskfile.check_mount',
+ mock.MagicMock(side_effect=[False])):
+ self.assertRaises(
+ DiskFileDeviceUnavailable,
+ df_mgr.get_hashes, self.existing_device, '0', ['123'],
+ policy)
+
+ def test_get_hashes_zero_bytes_pickle(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ part_path = os.path.join(self.devices, self.existing_device,
+ diskfile.get_data_dir(policy), '0')
+ os.makedirs(part_path)
+ # create a pre-existing zero-byte file
+ open(os.path.join(part_path, diskfile.HASH_FILE), 'w').close()
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [],
+ policy)
+ self.assertEqual(hashes, {})
+
+ def test_get_hashes_hash_suffix_enotdir(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # create a real suffix dir
+ df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
+ 'o', policy=policy, frag_index=3)
+ df.delete(Timestamp(time()))
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+ # touch a bad suffix dir
+ part_dir = os.path.join(self.devices, self.existing_device,
+ diskfile.get_data_dir(policy), '0')
+ open(os.path.join(part_dir, 'bad'), 'w').close()
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
+ self.assertTrue(suffix in hashes)
+ self.assertFalse('bad' in hashes)
+
+ def test_get_hashes_hash_suffix_other_oserror(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ suffix = '123'
+ suffix_path = os.path.join(self.devices, self.existing_device,
+ diskfile.get_data_dir(policy), '0',
+ suffix)
+ os.makedirs(suffix_path)
+ self.assertTrue(os.path.exists(suffix_path)) # sanity
+ hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
+ policy)
+ expected = {
+ EC_POLICY: {'123': {}},
+ REPL_POLICY: {'123': EMPTY_ETAG},
+ }[policy.policy_type]
+ msg = 'expected %r != %r for policy %r' % (expected, hashes,
+ policy)
+ self.assertEqual(hashes, expected, msg)
+
+ # this OSError does *not* raise PathNotDir, and is allowed to leak
+ # from hash_suffix into get_hashes
+ mocked_os_listdir = mock.Mock(
+ side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))
+ with mock.patch("os.listdir", mocked_os_listdir):
+ with mock.patch('swift.obj.diskfile.logging') as mock_logging:
+ hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
+ self.assertEqual(mock_logging.method_calls,
+ [mock.call.exception('Error hashing suffix')])
+ # recalc always causes a suffix to get reset to None; the listdir
+ # error prevents the suffix from being rehashed
+ expected = {'123': None}
+ msg = 'expected %r != %r for policy %r' % (expected, hashes,
+ policy)
+ self.assertEqual(hashes, expected, msg)
+
+ def test_get_hashes_modified_recursive_retry(self):
+ for policy in self.iter_policies():
+ df_mgr = self.df_router[policy]
+ # first create an empty pickle
+ df_mgr.get_hashes(self.existing_device, '0', [], policy)
+ hashes_file = os.path.join(
+ self.devices, self.existing_device,
+ diskfile.get_data_dir(policy), '0', diskfile.HASH_FILE)
+ mtime = os.path.getmtime(hashes_file)
+ non_local = {'mtime': mtime}
+
+ calls = []
+
+ def mock_getmtime(filename):
+ t = non_local['mtime']
+ if len(calls) <= 3:
+ # this will make the *next* call get a slightly
+ # newer mtime than the last
+ non_local['mtime'] += 1
+ # track exactly the value for every return
+ calls.append(t)
+ return t
+ with mock.patch('swift.obj.diskfile.getmtime',
+ mock_getmtime):
+ df_mgr.get_hashes(self.existing_device, '0', ['123'],
+ policy)
+
+ self.assertEqual(calls, [
+ mtime + 0, # read
+ mtime + 1, # modified
+ mtime + 2, # read
+ mtime + 3, # modifed
+ mtime + 4, # read
+ mtime + 4, # not modifed
+ ])
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py
index 7c174f251..ca815d358 100644
--- a/test/unit/obj/test_expirer.py
+++ b/test/unit/obj/test_expirer.py
@@ -16,7 +16,7 @@
import urllib
from time import time
from unittest import main, TestCase
-from test.unit import FakeLogger, FakeRing, mocked_http_conn
+from test.unit import FakeRing, mocked_http_conn, debug_logger
from copy import deepcopy
from tempfile import mkdtemp
from shutil import rmtree
@@ -53,7 +53,8 @@ class TestObjectExpirer(TestCase):
internal_client.sleep = not_sleep
self.rcache = mkdtemp()
- self.logger = FakeLogger()
+ self.conf = {'recon_cache_path': self.rcache}
+ self.logger = debug_logger('test-recon')
def tearDown(self):
rmtree(self.rcache)
@@ -167,7 +168,7 @@ class TestObjectExpirer(TestCase):
'2': set('5-five 6-six'.split()),
'3': set(u'7-seven\u2661'.split()),
}
- x = ObjectExpirer({})
+ x = ObjectExpirer(self.conf)
x.swift = InternalClient(containers)
deleted_objects = {}
@@ -233,31 +234,32 @@ class TestObjectExpirer(TestCase):
x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
- self.assertEqual(x.logger.log_dict['info'], [])
+ self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
- self.assertTrue('completed' in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
- self.assertTrue('so far' not in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
+ self.assertTrue(
+ 'completed' in str(x.logger.get_lines_for_level('info')))
+ self.assertTrue(
+ 'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
- self.assertTrue('completed' not in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
- self.assertTrue('so far' in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
+ self.assertTrue(
+ 'completed' not in str(x.logger.get_lines_for_level('info')))
+ self.assertTrue(
+ 'so far' in str(x.logger.get_lines_for_level('info')))
def test_run_once_nothing_to_do(self):
- x = expirer.ObjectExpirer({}, logger=self.logger)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
- self.assertEqual(x.logger.log_dict['exception'],
- [(("Unhandled exception",), {},
- "'str' object has no attribute "
- "'get_account_info'")])
+ self.assertEqual(x.logger.get_lines_for_level('error'),
+ ["Unhandled exception: "])
+ log_args, log_kwargs = x.logger.log_dict['error'][0]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ "'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
class InternalClient(object):
@@ -267,14 +269,14 @@ class TestObjectExpirer(TestCase):
def iter_containers(*a, **kw):
return []
- x = expirer.ObjectExpirer({}, logger=self.logger)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
x.run_once()
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
def test_run_once_unicode_problem(self):
class InternalClient(object):
@@ -296,7 +298,7 @@ class TestObjectExpirer(TestCase):
def delete_container(*a, **kw):
pass
- x = expirer.ObjectExpirer({}, logger=self.logger)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
requests = []
@@ -323,27 +325,28 @@ class TestObjectExpirer(TestCase):
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
- x = expirer.ObjectExpirer({'recon_cache_path': self.rcache},
+ x = expirer.ObjectExpirer(self.conf,
logger=self.logger)
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
- for exccall in x.logger.log_dict['exception']:
- self.assertTrue(
- 'This should not have been called' not in exccall[0][0])
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ logs = x.logger.all_log_lines()
+ self.assertEqual(logs['info'], [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
+ self.assertTrue('error' not in logs)
# Reverse test to be sure it still would blow up the way expected.
fake_swift = InternalClient([{'name': str(int(time() - 86400))}])
- x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.run_once()
self.assertEqual(
- x.logger.log_dict['exception'],
- [(('Unhandled exception',), {},
- str(Exception('This should not have been called')))])
+ x.logger.get_lines_for_level('error'), [
+ 'Unhandled exception: '])
+ log_args, log_kwargs = x.logger.log_dict['error'][-1]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ 'This should not have been called')
def test_object_timestamp_break(self):
class InternalClient(object):
@@ -369,33 +372,27 @@ class TestObjectExpirer(TestCase):
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
- x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.run_once()
- for exccall in x.logger.log_dict['exception']:
- self.assertTrue(
- 'This should not have been called' not in exccall[0][0])
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
-
+ self.assertTrue('error' not in x.logger.all_log_lines())
+ self.assertEqual(x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
- x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
self.assertEqual(
- excswhiledeleting,
+ x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj '
- 'This should not have been called' % (ts, ts)])
+ 'This should not have been called: ' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
@@ -428,24 +425,22 @@ class TestObjectExpirer(TestCase):
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
- x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.iter_containers = lambda: [str(int(time() - 86400))]
x.delete_actual_object = deliberately_blow_up
x.pop_queue = should_not_get_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
+ error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(
- excswhiledeleting,
+ error_lines,
['Exception while deleting object %d %d-actual-obj '
- 'failed to delete actual object' % (ts, ts)])
+ 'failed to delete actual object: ' % (ts, ts)])
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
@@ -453,18 +448,15 @@ class TestObjectExpirer(TestCase):
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
self.logger._clear()
- x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = should_not_get_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
self.assertEqual(
- excswhiledeleting,
+ self.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj This should '
- 'not have been called' % (ts, ts)])
+ 'not have been called: ' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
@@ -493,7 +485,8 @@ class TestObjectExpirer(TestCase):
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-acc/c/actual-obj' % int(time() - 86400)}])
- x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
@@ -501,10 +494,9 @@ class TestObjectExpirer(TestCase):
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 1 objects expired',), {})])
+ x.logger.get_lines_for_level('info'),
+ ['Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 1 objects expired'])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
@@ -539,17 +531,18 @@ class TestObjectExpirer(TestCase):
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
- x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = delete_actual_object_test_for_unicode
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 1 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 1 objects expired',
+ ])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
@@ -579,7 +572,7 @@ class TestObjectExpirer(TestCase):
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
- x = expirer.ObjectExpirer({}, logger=self.logger)
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
cts = int(time() - 86400)
ots = int(time() - 86400)
@@ -597,28 +590,24 @@ class TestObjectExpirer(TestCase):
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
- self.assertEqual(sorted(excswhiledeleting), sorted([
+ error_lines = x.logger.get_lines_for_level('error')
+ self.assertEqual(sorted(error_lines), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
- 'delete actual object' % (cts, ots),
+ 'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
- 'delete actual object' % (cts, ots),
+ 'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
- 'delete actual object' % (cts + 1, ots),
+ 'delete actual object: ' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
- 'delete actual object' % (cts + 1, ots),
+ 'delete actual object: ' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
- 'container' % (cts,),
+ 'container: ' % (cts,),
'Exception while deleting container %d failed to delete '
- 'container' % (cts + 1,)]))
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ 'container: ' % (cts + 1,)]))
+ self.assertEqual(x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
@@ -664,9 +653,11 @@ class TestObjectExpirer(TestCase):
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
- self.assertEqual(x.logger.log_dict['exception'],
- [(('Unhandled exception',), {},
- 'exception 1')])
+ self.assertEqual(x.logger.get_lines_for_level('error'),
+ ['Unhandled exception: '])
+ log_args, log_kwargs = x.logger.log_dict['error'][0]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ 'exception 1')
def test_delete_actual_object(self):
got_env = [None]
diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py
new file mode 100755
index 000000000..93a50e84d
--- /dev/null
+++ b/test/unit/obj/test_reconstructor.py
@@ -0,0 +1,2484 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import itertools
+import unittest
+import os
+from hashlib import md5
+import mock
+import cPickle as pickle
+import tempfile
+import time
+import shutil
+import re
+import random
+from eventlet import Timeout
+
+from contextlib import closing, nested, contextmanager
+from gzip import GzipFile
+from shutil import rmtree
+from swift.common import utils
+from swift.common.exceptions import DiskFileError
+from swift.obj import diskfile, reconstructor as object_reconstructor
+from swift.common import ring
+from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
+ POLICIES, EC_POLICY)
+from swift.obj.reconstructor import REVERT
+
+from test.unit import (patch_policies, debug_logger, mocked_http_conn,
+ FabricatedRing, make_timestamp_iter)
+
+
+@contextmanager
+def mock_ssync_sender(ssync_calls=None, response_callback=None, **kwargs):
+ def fake_ssync(daemon, node, job, suffixes):
+ if ssync_calls is not None:
+ ssync_calls.append(
+ {'node': node, 'job': job, 'suffixes': suffixes})
+
+ def fake_call():
+ if response_callback:
+ response = response_callback(node, job, suffixes)
+ else:
+ response = True, {}
+ return response
+ return fake_call
+
+ with mock.patch('swift.obj.reconstructor.ssync_sender', fake_ssync):
+ yield fake_ssync
+
+
+def make_ec_archive_bodies(policy, test_body):
+ segment_size = policy.ec_segment_size
+ # split up the body into buffers
+ chunks = [test_body[x:x + segment_size]
+ for x in range(0, len(test_body), segment_size)]
+ # encode the buffers into fragment payloads
+ fragment_payloads = []
+ for chunk in chunks:
+ fragments = policy.pyeclib_driver.encode(chunk)
+ if not fragments:
+ break
+ fragment_payloads.append(fragments)
+
+ # join up the fragment payloads per node
+ ec_archive_bodies = [''.join(fragments)
+ for fragments in zip(*fragment_payloads)]
+ return ec_archive_bodies
+
+
+def _ips():
+ return ['127.0.0.1']
+object_reconstructor.whataremyips = _ips
+
+
+def _create_test_rings(path):
+ testgz = os.path.join(path, 'object.ring.gz')
+ intended_replica2part2dev_id = [
+ [0, 1, 2],
+ [1, 2, 3],
+ [2, 3, 0]
+ ]
+
+ intended_devs = [
+ {'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
+ 'port': 6000},
+ {'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1',
+ 'port': 6000},
+ {'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
+ 'port': 6000},
+ {'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
+ 'port': 6000}
+ ]
+ intended_part_shift = 30
+ with closing(GzipFile(testgz, 'wb')) as f:
+ pickle.dump(
+ ring.RingData(intended_replica2part2dev_id,
+ intended_devs, intended_part_shift),
+ f)
+
+ testgz = os.path.join(path, 'object-1.ring.gz')
+ with closing(GzipFile(testgz, 'wb')) as f:
+ pickle.dump(
+ ring.RingData(intended_replica2part2dev_id,
+ intended_devs, intended_part_shift),
+ f)
+
+
+def count_stats(logger, key, metric):
+ count = 0
+ for record in logger.log_dict[key]:
+ log_args, log_kwargs = record
+ m = log_args[0]
+ if re.match(metric, m):
+ count += 1
+ return count
+
+
+@patch_policies([StoragePolicy(0, name='zero', is_default=True),
+ ECStoragePolicy(1, name='one', ec_type='jerasure_rs_vand',
+ ec_ndata=2, ec_nparity=1)])
+class TestGlobalSetupObjectReconstructor(unittest.TestCase):
+
+ def setUp(self):
+ self.testdir = tempfile.mkdtemp()
+ _create_test_rings(self.testdir)
+ POLICIES[0].object_ring = ring.Ring(self.testdir, ring_name='object')
+ POLICIES[1].object_ring = ring.Ring(self.testdir, ring_name='object-1')
+ utils.HASH_PATH_SUFFIX = 'endcap'
+ utils.HASH_PATH_PREFIX = ''
+ self.devices = os.path.join(self.testdir, 'node')
+ os.makedirs(self.devices)
+ os.mkdir(os.path.join(self.devices, 'sda1'))
+ self.objects = os.path.join(self.devices, 'sda1',
+ diskfile.get_data_dir(POLICIES[0]))
+ self.objects_1 = os.path.join(self.devices, 'sda1',
+ diskfile.get_data_dir(POLICIES[1]))
+ os.mkdir(self.objects)
+ os.mkdir(self.objects_1)
+ self.parts = {}
+ self.parts_1 = {}
+ self.part_nums = ['0', '1', '2']
+ for part in self.part_nums:
+ self.parts[part] = os.path.join(self.objects, part)
+ os.mkdir(self.parts[part])
+ self.parts_1[part] = os.path.join(self.objects_1, part)
+ os.mkdir(self.parts_1[part])
+
+ self.conf = dict(
+ swift_dir=self.testdir, devices=self.devices, mount_check='false',
+ timeout='300', stats_interval='1')
+ self.logger = debug_logger('test-reconstructor')
+ self.reconstructor = object_reconstructor.ObjectReconstructor(
+ self.conf, logger=self.logger)
+
+ self.policy = POLICIES[1]
+
+ # most of the reconstructor test methods require that there be
+ # real objects in place, not just part dirs, so we'll create them
+ # all here....
+ # part 0: 3C1/hash/xxx-1.data <-- job: sync_only - parnters (FI 1)
+ # /xxx.durable <-- included in earlier job (FI 1)
+ # 061/hash/xxx-1.data <-- included in earlier job (FI 1)
+ # /xxx.durable <-- included in earlier job (FI 1)
+ # /xxx-2.data <-- job: sync_revert to index 2
+
+ # part 1: 3C1/hash/xxx-0.data <-- job: sync_only - parnters (FI 0)
+ # /xxx-1.data <-- job: sync_revert to index 1
+ # /xxx.durable <-- included in earlier jobs (FI 0, 1)
+ # 061/hash/xxx-1.data <-- included in earlier job (FI 1)
+ # /xxx.durable <-- included in earlier job (FI 1)
+
+ # part 2: 3C1/hash/xxx-2.data <-- job: sync_revert to index 2
+ # /xxx.durable <-- included in earlier job (FI 2)
+ # 061/hash/xxx-0.data <-- job: sync_revert to index 0
+ # /xxx.durable <-- included in earlier job (FI 0)
+
+ def _create_frag_archives(policy, obj_path, local_id, obj_set):
+ # we'll create 2 sets of objects in different suffix dirs
+ # so we cover all the scenarios we want (3 of them)
+ # 1) part dir with all FI's matching the local node index
+ # 2) part dir with one local and mix of others
+ # 3) part dir with no local FI and one or more others
+ def part_0(set):
+ if set == 0:
+ # just the local
+ return local_id
+ else:
+ # onde local and all of another
+ if obj_num == 0:
+ return local_id
+ else:
+ return (local_id + 1) % 3
+
+ def part_1(set):
+ if set == 0:
+ # one local and all of another
+ if obj_num == 0:
+ return local_id
+ else:
+ return (local_id + 2) % 3
+ else:
+ # just the local node
+ return local_id
+
+ def part_2(set):
+ # this part is a handoff in our config (always)
+ # so lets do a set with indicies from different nodes
+ if set == 0:
+ return (local_id + 1) % 3
+ else:
+ return (local_id + 2) % 3
+
+ # function dictionary for defining test scenarios base on set #
+ scenarios = {'0': part_0,
+ '1': part_1,
+ '2': part_2}
+
+ def _create_df(obj_num, part_num):
+ self._create_diskfile(
+ part=part_num, object_name='o' + str(obj_set),
+ policy=policy, frag_index=scenarios[part_num](obj_set),
+ timestamp=utils.Timestamp(t))
+
+ for part_num in self.part_nums:
+ # create 3 unique objcets per part, each part
+ # will then have a unique mix of FIs for the
+ # possible scenarios
+ for obj_num in range(0, 3):
+ _create_df(obj_num, part_num)
+
+ ips = utils.whataremyips()
+ for policy in [p for p in POLICIES if p.policy_type == EC_POLICY]:
+ self.ec_policy = policy
+ self.ec_obj_ring = self.reconstructor.load_object_ring(
+ self.ec_policy)
+ data_dir = diskfile.get_data_dir(self.ec_policy)
+ for local_dev in [dev for dev in self.ec_obj_ring.devs
+ if dev and dev['replication_ip'] in ips and
+ dev['replication_port'] ==
+ self.reconstructor.port]:
+ self.ec_local_dev = local_dev
+ dev_path = os.path.join(self.reconstructor.devices_dir,
+ self.ec_local_dev['device'])
+ self.ec_obj_path = os.path.join(dev_path, data_dir)
+
+ # create a bunch of FA's to test
+ t = 1421181937.70054 # time.time()
+ with mock.patch('swift.obj.diskfile.time') as mock_time:
+ # since (a) we are using a fixed time here to create
+ # frags which corresponds to all the hardcoded hashes and
+ # (b) the EC diskfile will delete its .data file right
+ # after creating if it has expired, use this horrible hack
+ # to prevent the reclaim happening
+ mock_time.time.return_value = 0.0
+ _create_frag_archives(self.ec_policy, self.ec_obj_path,
+ self.ec_local_dev['id'], 0)
+ _create_frag_archives(self.ec_policy, self.ec_obj_path,
+ self.ec_local_dev['id'], 1)
+ break
+ break
+
+ def tearDown(self):
+ rmtree(self.testdir, ignore_errors=1)
+
+ def _create_diskfile(self, policy=None, part=0, object_name='o',
+ frag_index=0, timestamp=None, test_data=None):
+ policy = policy or self.policy
+ df_mgr = self.reconstructor._df_router[policy]
+ df = df_mgr.get_diskfile('sda1', part, 'a', 'c', object_name,
+ policy=policy)
+ with df.create() as writer:
+ timestamp = timestamp or utils.Timestamp(time.time())
+ test_data = test_data or 'test data'
+ writer.write(test_data)
+ metadata = {
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': len(test_data),
+ 'Etag': md5(test_data).hexdigest(),
+ 'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
+ }
+ writer.put(metadata)
+ writer.commit(timestamp)
+ return df
+
+ def debug_wtf(self):
+ # won't include this in the final, just handy reminder of where
+ # things are...
+ for pol in [p for p in POLICIES if p.policy_type == EC_POLICY]:
+ obj_ring = pol.object_ring
+ for part_num in self.part_nums:
+ print "\n part_num %s " % part_num
+ part_nodes = obj_ring.get_part_nodes(int(part_num))
+ print "\n part_nodes %s " % part_nodes
+ for local_dev in obj_ring.devs:
+ partners = self.reconstructor._get_partners(
+ local_dev['id'], obj_ring, part_num)
+ if partners:
+ print "\n local_dev %s \n partners %s " % (local_dev,
+ partners)
+
+ def assert_expected_jobs(self, part_num, jobs):
+ for job in jobs:
+ del job['path']
+ del job['policy']
+ if 'local_index' in job:
+ del job['local_index']
+ job['suffixes'].sort()
+
+ expected = []
+ # part num 0
+ expected.append(
+ [{
+ 'sync_to': [{
+ 'index': 2,
+ 'replication_port': 6000,
+ 'zone': 2,
+ 'ip': '127.0.0.2',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.2',
+ 'device': 'sda1',
+ 'id': 2,
+ }],
+ 'job_type': object_reconstructor.REVERT,
+ 'suffixes': ['061'],
+ 'partition': 0,
+ 'frag_index': 2,
+ 'device': 'sda1',
+ 'local_dev': {
+ 'replication_port': 6000,
+ 'zone': 1,
+ 'ip': '127.0.0.1',
+ 'region': 1,
+ 'id': 1,
+ 'replication_ip': '127.0.0.1',
+ 'device': 'sda1', 'port': 6000,
+ },
+ 'hashes': {
+ '061': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 2: '0e6e8d48d801dc89fd31904ae3b31229',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229',
+ },
+ '3c1': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229',
+ },
+ },
+ }, {
+ 'sync_to': [{
+ 'index': 0,
+ 'replication_port': 6000,
+ 'zone': 0,
+ 'ip': '127.0.0.0',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.0',
+ 'device': 'sda1', 'id': 0,
+ }, {
+ 'index': 2,
+ 'replication_port': 6000,
+ 'zone': 2,
+ 'ip': '127.0.0.2',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.2',
+ 'device': 'sda1',
+ 'id': 2,
+ }],
+ 'job_type': object_reconstructor.SYNC,
+ 'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
+ 'suffixes': ['061', '3c1'],
+ 'partition': 0,
+ 'frag_index': 1,
+ 'device': 'sda1',
+ 'local_dev': {
+ 'replication_port': 6000,
+ 'zone': 1,
+ 'ip': '127.0.0.1',
+ 'region': 1,
+ 'id': 1,
+ 'replication_ip': '127.0.0.1',
+ 'device': 'sda1',
+ 'port': 6000,
+ },
+ 'hashes':
+ {
+ '061': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 2: '0e6e8d48d801dc89fd31904ae3b31229',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229'
+ },
+ '3c1': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229',
+ },
+ },
+ }]
+ )
+ # part num 1
+ expected.append(
+ [{
+ 'sync_to': [{
+ 'index': 1,
+ 'replication_port': 6000,
+ 'zone': 2,
+ 'ip': '127.0.0.2',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.2',
+ 'device': 'sda1',
+ 'id': 2,
+ }],
+ 'job_type': object_reconstructor.REVERT,
+ 'suffixes': ['061', '3c1'],
+ 'partition': 1,
+ 'frag_index': 1,
+ 'device': 'sda1',
+ 'local_dev': {
+ 'replication_port': 6000,
+ 'zone': 1,
+ 'ip': '127.0.0.1',
+ 'region': 1,
+ 'id': 1,
+ 'replication_ip': '127.0.0.1',
+ 'device': 'sda1',
+ 'port': 6000,
+ },
+ 'hashes':
+ {
+ '061': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229',
+ },
+ '3c1': {
+ 0: '0e6e8d48d801dc89fd31904ae3b31229',
+ None: '85b02a5283704292a511078a5c483da5',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229',
+ },
+ },
+ }, {
+ 'sync_to': [{
+ 'index': 2,
+ 'replication_port': 6000,
+ 'zone': 4,
+ 'ip': '127.0.0.3',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.3',
+ 'device': 'sda1', 'id': 3,
+ }, {
+ 'index': 1,
+ 'replication_port': 6000,
+ 'zone': 2,
+ 'ip': '127.0.0.2',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.2',
+ 'device': 'sda1',
+ 'id': 2,
+ }],
+ 'job_type': object_reconstructor.SYNC,
+ 'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
+ 'suffixes': ['3c1'],
+ 'partition': 1,
+ 'frag_index': 0,
+ 'device': 'sda1',
+ 'local_dev': {
+ 'replication_port': 6000,
+ 'zone': 1,
+ 'ip': '127.0.0.1',
+ 'region': 1,
+ 'id': 1,
+ 'replication_ip': '127.0.0.1',
+ 'device': 'sda1',
+ 'port': 6000,
+ },
+ 'hashes': {
+ '061': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229',
+ },
+ '3c1': {
+ 0: '0e6e8d48d801dc89fd31904ae3b31229',
+ None: '85b02a5283704292a511078a5c483da5',
+ 1: '0e6e8d48d801dc89fd31904ae3b31229',
+ },
+ },
+ }]
+ )
+ # part num 2
+ expected.append(
+ [{
+ 'sync_to': [{
+ 'index': 0,
+ 'replication_port': 6000,
+ 'zone': 2,
+ 'ip': '127.0.0.2',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.2',
+ 'device': 'sda1', 'id': 2,
+ }],
+ 'job_type': object_reconstructor.REVERT,
+ 'suffixes': ['061'],
+ 'partition': 2,
+ 'frag_index': 0,
+ 'device': 'sda1',
+ 'local_dev': {
+ 'replication_port': 6000,
+ 'zone': 1,
+ 'ip': '127.0.0.1',
+ 'region': 1,
+ 'id': 1,
+ 'replication_ip': '127.0.0.1',
+ 'device': 'sda1',
+ 'port': 6000,
+ },
+ 'hashes': {
+ '061': {
+ 0: '0e6e8d48d801dc89fd31904ae3b31229',
+ None: '85b02a5283704292a511078a5c483da5'
+ },
+ '3c1': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 2: '0e6e8d48d801dc89fd31904ae3b31229'
+ },
+ },
+ }, {
+ 'sync_to': [{
+ 'index': 2,
+ 'replication_port': 6000,
+ 'zone': 0,
+ 'ip': '127.0.0.0',
+ 'region': 1,
+ 'port': 6000,
+ 'replication_ip': '127.0.0.0',
+ 'device': 'sda1',
+ 'id': 0,
+ }],
+ 'job_type': object_reconstructor.REVERT,
+ 'suffixes': ['3c1'],
+ 'partition': 2,
+ 'frag_index': 2,
+ 'device': 'sda1',
+ 'local_dev': {
+ 'replication_port': 6000,
+ 'zone': 1,
+ 'ip': '127.0.0.1',
+ 'region': 1,
+ 'id': 1,
+ 'replication_ip': '127.0.0.1',
+ 'device': 'sda1',
+ 'port': 6000
+ },
+ 'hashes': {
+ '061': {
+ 0: '0e6e8d48d801dc89fd31904ae3b31229',
+ None: '85b02a5283704292a511078a5c483da5'
+ },
+ '3c1': {
+ None: '85b02a5283704292a511078a5c483da5',
+ 2: '0e6e8d48d801dc89fd31904ae3b31229'
+ },
+ },
+ }]
+ )
+
+ def check_jobs(part_num):
+ try:
+ expected_jobs = expected[int(part_num)]
+ except (IndexError, ValueError):
+ self.fail('Unknown part number %r' % part_num)
+ expected_by_part_frag_index = dict(
+ ((j['partition'], j['frag_index']), j) for j in expected_jobs)
+ for job in jobs:
+ job_key = (job['partition'], job['frag_index'])
+ if job_key in expected_by_part_frag_index:
+ for k, value in job.items():
+ expected_value = \
+ expected_by_part_frag_index[job_key][k]
+ try:
+ if isinstance(value, list):
+ value.sort()
+ expected_value.sort()
+ self.assertEqual(value, expected_value)
+ except AssertionError as e:
+ extra_info = \
+ '\n\n... for %r in part num %s job %r' % (
+ k, part_num, job_key)
+ raise AssertionError(str(e) + extra_info)
+ else:
+ self.fail(
+ 'Unexpected job %r for part num %s - '
+ 'expected jobs where %r' % (
+ job_key, part_num,
+ expected_by_part_frag_index.keys()))
+ for expected_job in expected_jobs:
+ if expected_job in jobs:
+ jobs.remove(expected_job)
+ self.assertFalse(jobs) # that should be all of them
+ check_jobs(part_num)
+
+ def test_run_once(self):
+ with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
+ with mock_ssync_sender():
+ self.reconstructor.run_once()
+
+ def test_get_response(self):
+ part = self.part_nums[0]
+ node = POLICIES[0].object_ring.get_part_nodes(int(part))[0]
+ for stat_code in (200, 400):
+ with mocked_http_conn(stat_code):
+ resp = self.reconstructor._get_response(node, part,
+ path='nada',
+ headers={},
+ policy=POLICIES[0])
+ if resp:
+ self.assertEqual(resp.status, 200)
+ else:
+ self.assertEqual(
+ len(self.reconstructor.logger.log_dict['warning']), 1)
+
+ def test_reconstructor_skips_bogus_partition_dirs(self):
+ # A directory in the wrong place shouldn't crash the reconstructor
+ rmtree(self.objects_1)
+ os.mkdir(self.objects_1)
+
+ os.mkdir(os.path.join(self.objects_1, "burrito"))
+ jobs = []
+ for part_info in self.reconstructor.collect_parts():
+ jobs += self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertEqual(len(jobs), 0)
+
+ def test_check_ring(self):
+ testring = tempfile.mkdtemp()
+ _create_test_rings(testring)
+ obj_ring = ring.Ring(testring, ring_name='object') # noqa
+ self.assertTrue(self.reconstructor.check_ring(obj_ring))
+ orig_check = self.reconstructor.next_check
+ self.reconstructor.next_check = orig_check - 30
+ self.assertTrue(self.reconstructor.check_ring(obj_ring))
+ self.reconstructor.next_check = orig_check
+ orig_ring_time = obj_ring._mtime
+ obj_ring._mtime = orig_ring_time - 30
+ self.assertTrue(self.reconstructor.check_ring(obj_ring))
+ self.reconstructor.next_check = orig_check - 30
+ self.assertFalse(self.reconstructor.check_ring(obj_ring))
+ rmtree(testring, ignore_errors=1)
+
+ def test_build_reconstruction_jobs(self):
+ self.reconstructor.handoffs_first = False
+ self.reconstructor._reset_stats()
+ for part_info in self.reconstructor.collect_parts():
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertTrue(jobs[0]['job_type'] in
+ (object_reconstructor.SYNC,
+ object_reconstructor.REVERT))
+ self.assert_expected_jobs(part_info['partition'], jobs)
+
+ self.reconstructor.handoffs_first = True
+ self.reconstructor._reset_stats()
+ for part_info in self.reconstructor.collect_parts():
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertTrue(jobs[0]['job_type'] ==
+ object_reconstructor.REVERT)
+ self.assert_expected_jobs(part_info['partition'], jobs)
+
+ def test_get_partners(self):
+ # we're going to perform an exhaustive test of every possible
+ # combination of partitions and nodes in our custom test ring
+
+ # format: [dev_id in question, 'part_num',
+ # [part_nodes for the given part], left id, right id...]
+ expected_partners = sorted([
+ (0, '0', [0, 1, 2], 2, 1), (0, '2', [2, 3, 0], 3, 2),
+ (1, '0', [0, 1, 2], 0, 2), (1, '1', [1, 2, 3], 3, 2),
+ (2, '0', [0, 1, 2], 1, 0), (2, '1', [1, 2, 3], 1, 3),
+ (2, '2', [2, 3, 0], 0, 3), (3, '1', [1, 2, 3], 2, 1),
+ (3, '2', [2, 3, 0], 2, 0), (0, '0', [0, 1, 2], 2, 1),
+ (0, '2', [2, 3, 0], 3, 2), (1, '0', [0, 1, 2], 0, 2),
+ (1, '1', [1, 2, 3], 3, 2), (2, '0', [0, 1, 2], 1, 0),
+ (2, '1', [1, 2, 3], 1, 3), (2, '2', [2, 3, 0], 0, 3),
+ (3, '1', [1, 2, 3], 2, 1), (3, '2', [2, 3, 0], 2, 0),
+ ])
+
+ got_partners = []
+ for pol in POLICIES:
+ obj_ring = pol.object_ring
+ for part_num in self.part_nums:
+ part_nodes = obj_ring.get_part_nodes(int(part_num))
+ primary_ids = [n['id'] for n in part_nodes]
+ for node in part_nodes:
+ partners = self.reconstructor._get_partners(
+ node['index'], part_nodes)
+ left = partners[0]['id']
+ right = partners[1]['id']
+ got_partners.append((
+ node['id'], part_num, primary_ids, left, right))
+
+ self.assertEqual(expected_partners, sorted(got_partners))
+
+ def test_collect_parts(self):
+ parts = []
+ for part_info in self.reconstructor.collect_parts():
+ parts.append(part_info['partition'])
+ self.assertEqual(sorted(parts), [0, 1, 2])
+
+ def test_collect_parts_mkdirs_error(self):
+
+ def blowup_mkdirs(path):
+ raise OSError('Ow!')
+
+ with mock.patch.object(object_reconstructor, 'mkdirs', blowup_mkdirs):
+ rmtree(self.objects_1, ignore_errors=1)
+ parts = []
+ for part_info in self.reconstructor.collect_parts():
+ parts.append(part_info['partition'])
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ log_args, log_kwargs = self.logger.log_dict['error'][0]
+ self.assertEquals(str(log_kwargs['exc_info'][1]), 'Ow!')
+
+ def test_removes_zbf(self):
+ # After running xfs_repair, a partition directory could become a
+ # zero-byte file. If this happens, the reconstructor should clean it
+ # up, log something, and move on to the next partition.
+
+ # Surprise! Partition dir 1 is actually a zero-byte file.
+ pol_1_part_1_path = os.path.join(self.objects_1, '1')
+ rmtree(pol_1_part_1_path)
+ with open(pol_1_part_1_path, 'w'):
+ pass
+ self.assertTrue(os.path.isfile(pol_1_part_1_path)) # sanity check
+
+ # since our collect_parts job is a generator, that yields directly
+ # into build_jobs and then spawns it's safe to do the remove_files
+ # without making reconstructor startup slow
+ for part_info in self.reconstructor.collect_parts():
+ self.assertNotEqual(pol_1_part_1_path, part_info['part_path'])
+ self.assertFalse(os.path.exists(pol_1_part_1_path))
+ warnings = self.reconstructor.logger.get_lines_for_level('warning')
+ self.assertEqual(1, len(warnings))
+ self.assertTrue('Unexpected entity in data dir:' in warnings[0],
+ 'Warning not found in %s' % warnings)
+
+ def _make_fake_ssync(self, ssync_calls):
+ class _fake_ssync(object):
+ def __init__(self, daemon, node, job, suffixes, **kwargs):
+ # capture context and generate an available_map of objs
+ context = {}
+ context['node'] = node
+ context['job'] = job
+ context['suffixes'] = suffixes
+ self.suffixes = suffixes
+ self.daemon = daemon
+ self.job = job
+ hash_gen = self.daemon._diskfile_mgr.yield_hashes(
+ self.job['device'], self.job['partition'],
+ self.job['policy'], self.suffixes,
+ frag_index=self.job.get('frag_index'))
+ self.available_map = {}
+ for path, hash_, ts in hash_gen:
+ self.available_map[hash_] = ts
+ context['available_map'] = self.available_map
+ ssync_calls.append(context)
+
+ def __call__(self, *args, **kwargs):
+ return True, self.available_map
+
+ return _fake_ssync
+
+ def test_delete_reverted(self):
+ # verify reconstructor deletes reverted frag indexes after ssync'ing
+
+ def visit_obj_dirs(context):
+ for suff in context['suffixes']:
+ suff_dir = os.path.join(
+ context['job']['path'], suff)
+ for root, dirs, files in os.walk(suff_dir):
+ for d in dirs:
+ dirpath = os.path.join(root, d)
+ files = os.listdir(dirpath)
+ yield dirpath, files
+
+ n_files = n_files_after = 0
+
+ # run reconstructor with delete function mocked out to check calls
+ ssync_calls = []
+ delete_func =\
+ 'swift.obj.reconstructor.ObjectReconstructor.delete_reverted_objs'
+ with mock.patch('swift.obj.reconstructor.ssync_sender',
+ self._make_fake_ssync(ssync_calls)):
+ with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
+ with mock.patch(delete_func) as mock_delete:
+ self.reconstructor.reconstruct()
+ expected_calls = []
+ for context in ssync_calls:
+ if context['job']['job_type'] == REVERT:
+ for dirpath, files in visit_obj_dirs(context):
+ # sanity check - expect some files to be in dir,
+ # may not be for the reverted frag index
+ self.assertTrue(files)
+ n_files += len(files)
+ expected_calls.append(mock.call(context['job'],
+ context['available_map'],
+ context['node']['index']))
+ mock_delete.assert_has_calls(expected_calls, any_order=True)
+
+ ssync_calls = []
+ with mock.patch('swift.obj.reconstructor.ssync_sender',
+ self._make_fake_ssync(ssync_calls)):
+ with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
+ self.reconstructor.reconstruct()
+ for context in ssync_calls:
+ if context['job']['job_type'] == REVERT:
+ data_file_tail = ('#%s.data'
+ % context['node']['index'])
+ for dirpath, files in visit_obj_dirs(context):
+ n_files_after += len(files)
+ for filename in files:
+ self.assertFalse(
+ filename.endswith(data_file_tail))
+
+ # sanity check that some files should were deleted
+ self.assertTrue(n_files > n_files_after)
+
+ def test_get_part_jobs(self):
+ # yeah, this test code expects a specific setup
+ self.assertEqual(len(self.part_nums), 3)
+
+ # OK, at this point we should have 4 loaded parts with one
+ jobs = []
+ for partition in os.listdir(self.ec_obj_path):
+ part_path = os.path.join(self.ec_obj_path, partition)
+ jobs = self.reconstructor._get_part_jobs(
+ self.ec_local_dev, part_path, int(partition), self.ec_policy)
+ self.assert_expected_jobs(partition, jobs)
+
+ def assertStatCount(self, stat_method, stat_prefix, expected_count):
+ count = count_stats(self.logger, stat_method, stat_prefix)
+ msg = 'expected %s != %s for %s %s' % (
+ expected_count, count, stat_method, stat_prefix)
+ self.assertEqual(expected_count, count, msg)
+
+ def test_delete_partition(self):
+ # part 2 is predefined to have all revert jobs
+ part_path = os.path.join(self.objects_1, '2')
+ self.assertTrue(os.access(part_path, os.F_OK))
+
+ ssync_calls = []
+ status = [200] * 2
+ body = pickle.dumps({})
+ with mocked_http_conn(*status, body=body) as request_log:
+ with mock.patch('swift.obj.reconstructor.ssync_sender',
+ self._make_fake_ssync(ssync_calls)):
+ self.reconstructor.reconstruct(override_partitions=[2])
+ expected_repliate_calls = set([
+ ('127.0.0.0', '/sda1/2/3c1'),
+ ('127.0.0.2', '/sda1/2/061'),
+ ])
+ found_calls = set((r['ip'], r['path'])
+ for r in request_log.requests)
+ self.assertEqual(expected_repliate_calls, found_calls)
+
+ expected_ssync_calls = sorted([
+ ('127.0.0.0', REVERT, 2, ['3c1']),
+ ('127.0.0.2', REVERT, 2, ['061']),
+ ])
+ self.assertEqual(expected_ssync_calls, sorted((
+ c['node']['ip'],
+ c['job']['job_type'],
+ c['job']['partition'],
+ c['suffixes'],
+ ) for c in ssync_calls))
+
+ expected_stats = {
+ ('increment', 'partition.delete.count.'): 2,
+ ('timing_since', 'partition.delete.timing'): 2,
+ }
+ for stat_key, expected in expected_stats.items():
+ stat_method, stat_prefix = stat_key
+ self.assertStatCount(stat_method, stat_prefix, expected)
+ # part 2 should be totally empty
+ policy = POLICIES[1]
+ hash_gen = self.reconstructor._df_router[policy].yield_hashes(
+ 'sda1', '2', policy)
+ for path, hash_, ts in hash_gen:
+ self.fail('found %s with %s in %s', (hash_, ts, path))
+ # but the partition directory and hashes pkl still exist
+ self.assertTrue(os.access(part_path, os.F_OK))
+ hashes_path = os.path.join(self.objects_1, '2', diskfile.HASH_FILE)
+ self.assertTrue(os.access(hashes_path, os.F_OK))
+
+ # ... but on next pass
+ ssync_calls = []
+ with mocked_http_conn() as request_log:
+ with mock.patch('swift.obj.reconstructor.ssync_sender',
+ self._make_fake_ssync(ssync_calls)):
+ self.reconstructor.reconstruct(override_partitions=[2])
+ # reconstruct won't generate any replicate or ssync_calls
+ self.assertFalse(request_log.requests)
+ self.assertFalse(ssync_calls)
+ # and the partition will get removed!
+ self.assertFalse(os.access(part_path, os.F_OK))
+
+ def test_process_job_all_success(self):
+ self.reconstructor._reset_stats()
+ with mock_ssync_sender():
+ with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
+ found_jobs = []
+ for part_info in self.reconstructor.collect_parts():
+ jobs = self.reconstructor.build_reconstruction_jobs(
+ part_info)
+ found_jobs.extend(jobs)
+ for job in jobs:
+ self.logger._clear()
+ node_count = len(job['sync_to'])
+ self.reconstructor.process_job(job)
+ if job['job_type'] == object_reconstructor.REVERT:
+ self.assertEqual(0, count_stats(
+ self.logger, 'update_stats', 'suffix.hashes'))
+ else:
+ self.assertStatCount('update_stats',
+ 'suffix.hashes',
+ node_count)
+ self.assertEqual(node_count, count_stats(
+ self.logger, 'update_stats', 'suffix.hashes'))
+ self.assertEqual(node_count, count_stats(
+ self.logger, 'update_stats', 'suffix.syncs'))
+ self.assertFalse('error' in
+ self.logger.all_log_lines())
+ self.assertEqual(self.reconstructor.suffix_sync, 8)
+ self.assertEqual(self.reconstructor.suffix_count, 8)
+ self.assertEqual(len(found_jobs), 6)
+
+ def test_process_job_all_insufficient_storage(self):
+ self.reconstructor._reset_stats()
+ with mock_ssync_sender():
+ with mocked_http_conn(*[507] * 10):
+ found_jobs = []
+ for part_info in self.reconstructor.collect_parts():
+ jobs = self.reconstructor.build_reconstruction_jobs(
+ part_info)
+ found_jobs.extend(jobs)
+ for job in jobs:
+ self.logger._clear()
+ self.reconstructor.process_job(job)
+ for line in self.logger.get_lines_for_level('error'):
+ self.assertTrue('responded as unmounted' in line)
+ self.assertEqual(0, count_stats(
+ self.logger, 'update_stats', 'suffix.hashes'))
+ self.assertEqual(0, count_stats(
+ self.logger, 'update_stats', 'suffix.syncs'))
+ self.assertEqual(self.reconstructor.suffix_sync, 0)
+ self.assertEqual(self.reconstructor.suffix_count, 0)
+ self.assertEqual(len(found_jobs), 6)
+
+ def test_process_job_all_client_error(self):
+ self.reconstructor._reset_stats()
+ with mock_ssync_sender():
+ with mocked_http_conn(*[400] * 10):
+ found_jobs = []
+ for part_info in self.reconstructor.collect_parts():
+ jobs = self.reconstructor.build_reconstruction_jobs(
+ part_info)
+ found_jobs.extend(jobs)
+ for job in jobs:
+ self.logger._clear()
+ self.reconstructor.process_job(job)
+ for line in self.logger.get_lines_for_level('error'):
+ self.assertTrue('Invalid response 400' in line)
+ self.assertEqual(0, count_stats(
+ self.logger, 'update_stats', 'suffix.hashes'))
+ self.assertEqual(0, count_stats(
+ self.logger, 'update_stats', 'suffix.syncs'))
+ self.assertEqual(self.reconstructor.suffix_sync, 0)
+ self.assertEqual(self.reconstructor.suffix_count, 0)
+ self.assertEqual(len(found_jobs), 6)
+
+ def test_process_job_all_timeout(self):
+ self.reconstructor._reset_stats()
+ with mock_ssync_sender():
+ with nested(mocked_http_conn(*[Timeout()] * 10)):
+ found_jobs = []
+ for part_info in self.reconstructor.collect_parts():
+ jobs = self.reconstructor.build_reconstruction_jobs(
+ part_info)
+ found_jobs.extend(jobs)
+ for job in jobs:
+ self.logger._clear()
+ self.reconstructor.process_job(job)
+ for line in self.logger.get_lines_for_level('error'):
+ self.assertTrue('Timeout (Nones)' in line)
+ self.assertStatCount(
+ 'update_stats', 'suffix.hashes', 0)
+ self.assertStatCount(
+ 'update_stats', 'suffix.syncs', 0)
+ self.assertEqual(self.reconstructor.suffix_sync, 0)
+ self.assertEqual(self.reconstructor.suffix_count, 0)
+ self.assertEqual(len(found_jobs), 6)
+
+
+@patch_policies(with_ec_default=True)
+class TestObjectReconstructor(unittest.TestCase):
+
+ def setUp(self):
+ self.policy = POLICIES.default
+ self.testdir = tempfile.mkdtemp()
+ self.devices = os.path.join(self.testdir, 'devices')
+ self.local_dev = self.policy.object_ring.devs[0]
+ self.ip = self.local_dev['replication_ip']
+ self.port = self.local_dev['replication_port']
+ self.conf = {
+ 'devices': self.devices,
+ 'mount_check': False,
+ 'bind_port': self.port,
+ }
+ self.logger = debug_logger('object-reconstructor')
+ self.reconstructor = object_reconstructor.ObjectReconstructor(
+ self.conf, logger=self.logger)
+ self.reconstructor._reset_stats()
+ # some tests bypass build_reconstruction_jobs and go to process_job
+ # directly, so you end up with a /0 when you try to show the
+ # percentage of complete jobs as ratio of the total job count
+ self.reconstructor.job_count = 1
+ self.policy.object_ring.max_more_nodes = \
+ self.policy.object_ring.replicas
+ self.ts_iter = make_timestamp_iter()
+
+ def tearDown(self):
+ self.reconstructor.stats_line()
+ shutil.rmtree(self.testdir)
+
+ def ts(self):
+ return next(self.ts_iter)
+
+ def test_collect_parts_skips_non_ec_policy_and_device(self):
+ stub_parts = (371, 78, 419, 834)
+ for policy in POLICIES:
+ datadir = diskfile.get_data_dir(policy)
+ for part in stub_parts:
+ utils.mkdirs(os.path.join(
+ self.devices, self.local_dev['device'],
+ datadir, str(part)))
+ with mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]):
+ part_infos = list(self.reconstructor.collect_parts())
+ found_parts = sorted(int(p['partition']) for p in part_infos)
+ self.assertEqual(found_parts, sorted(stub_parts))
+ for part_info in part_infos:
+ self.assertEqual(part_info['local_dev'], self.local_dev)
+ self.assertEqual(part_info['policy'], self.policy)
+ self.assertEqual(part_info['part_path'],
+ os.path.join(self.devices,
+ self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(part_info['partition'])))
+
+ def test_collect_parts_multi_device_skips_non_ring_devices(self):
+ device_parts = {
+ 'sda': (374,),
+ 'sdb': (179, 807),
+ 'sdc': (363, 468, 843),
+ }
+ for policy in POLICIES:
+ datadir = diskfile.get_data_dir(policy)
+ for dev, parts in device_parts.items():
+ for part in parts:
+ utils.mkdirs(os.path.join(
+ self.devices, dev,
+ datadir, str(part)))
+
+ # we're only going to add sda and sdc into the ring
+ local_devs = ('sda', 'sdc')
+ stub_ring_devs = [{
+ 'device': dev,
+ 'replication_ip': self.ip,
+ 'replication_port': self.port
+ } for dev in local_devs]
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch.object(self.policy.object_ring, '_devs',
+ new=stub_ring_devs)):
+ part_infos = list(self.reconstructor.collect_parts())
+ found_parts = sorted(int(p['partition']) for p in part_infos)
+ expected_parts = sorted(itertools.chain(
+ *(device_parts[d] for d in local_devs)))
+ self.assertEqual(found_parts, expected_parts)
+ for part_info in part_infos:
+ self.assertEqual(part_info['policy'], self.policy)
+ self.assertTrue(part_info['local_dev'] in stub_ring_devs)
+ dev = part_info['local_dev']
+ self.assertEqual(part_info['part_path'],
+ os.path.join(self.devices,
+ dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(part_info['partition'])))
+
+ def test_collect_parts_mount_check(self):
+ # each device has one part in it
+ local_devs = ('sda', 'sdb')
+ for i, dev in enumerate(local_devs):
+ datadir = diskfile.get_data_dir(self.policy)
+ utils.mkdirs(os.path.join(
+ self.devices, dev, datadir, str(i)))
+ stub_ring_devs = [{
+ 'device': dev,
+ 'replication_ip': self.ip,
+ 'replication_port': self.port
+ } for dev in local_devs]
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch.object(self.policy.object_ring, '_devs',
+ new=stub_ring_devs)):
+ part_infos = list(self.reconstructor.collect_parts())
+ self.assertEqual(2, len(part_infos)) # sanity
+ self.assertEqual(set(int(p['partition']) for p in part_infos),
+ set([0, 1]))
+
+ paths = []
+
+ def fake_ismount(path):
+ paths.append(path)
+ return False
+
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch.object(self.policy.object_ring, '_devs',
+ new=stub_ring_devs),
+ mock.patch('swift.obj.reconstructor.ismount',
+ fake_ismount)):
+ part_infos = list(self.reconstructor.collect_parts())
+ self.assertEqual(2, len(part_infos)) # sanity, same jobs
+ self.assertEqual(set(int(p['partition']) for p in part_infos),
+ set([0, 1]))
+
+ # ... because ismount was not called
+ self.assertEqual(paths, [])
+
+ # ... now with mount check
+ self.reconstructor.mount_check = True
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch.object(self.policy.object_ring, '_devs',
+ new=stub_ring_devs),
+ mock.patch('swift.obj.reconstructor.ismount',
+ fake_ismount)):
+ part_infos = list(self.reconstructor.collect_parts())
+ self.assertEqual([], part_infos) # sanity, no jobs
+
+ # ... because fake_ismount returned False for both paths
+ self.assertEqual(set(paths), set([
+ os.path.join(self.devices, dev) for dev in local_devs]))
+
+ def fake_ismount(path):
+ if path.endswith('sda'):
+ return True
+ else:
+ return False
+
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch.object(self.policy.object_ring, '_devs',
+ new=stub_ring_devs),
+ mock.patch('swift.obj.reconstructor.ismount',
+ fake_ismount)):
+ part_infos = list(self.reconstructor.collect_parts())
+ self.assertEqual(1, len(part_infos)) # only sda picked up (part 0)
+ self.assertEqual(part_infos[0]['partition'], 0)
+
+ def test_collect_parts_cleans_tmp(self):
+ local_devs = ('sda', 'sdc')
+ stub_ring_devs = [{
+ 'device': dev,
+ 'replication_ip': self.ip,
+ 'replication_port': self.port
+ } for dev in local_devs]
+ fake_unlink = mock.MagicMock()
+ self.reconstructor.reclaim_age = 1000
+ now = time.time()
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch('swift.obj.reconstructor.time.time',
+ return_value=now),
+ mock.patch.object(self.policy.object_ring, '_devs',
+ new=stub_ring_devs),
+ mock.patch('swift.obj.reconstructor.unlink_older_than',
+ fake_unlink)):
+ self.assertEqual([], list(self.reconstructor.collect_parts()))
+ # each local device hash unlink_older_than called on it,
+ # with now - self.reclaim_age
+ tmpdir = diskfile.get_tmp_dir(self.policy)
+ expected = now - 1000
+ self.assertEqual(fake_unlink.mock_calls, [
+ mock.call(os.path.join(self.devices, dev, tmpdir), expected)
+ for dev in local_devs])
+
+ def test_collect_parts_creates_datadir(self):
+ # create just the device path
+ dev_path = os.path.join(self.devices, self.local_dev['device'])
+ utils.mkdirs(dev_path)
+ with mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]):
+ self.assertEqual([], list(self.reconstructor.collect_parts()))
+ datadir_path = os.path.join(dev_path,
+ diskfile.get_data_dir(self.policy))
+ self.assertTrue(os.path.exists(datadir_path))
+
+ def test_collect_parts_creates_datadir_error(self):
+ # create just the device path
+ datadir_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy))
+ utils.mkdirs(os.path.dirname(datadir_path))
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch('swift.obj.reconstructor.mkdirs',
+ side_effect=OSError('kaboom!'))):
+ self.assertEqual([], list(self.reconstructor.collect_parts()))
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ line = error_lines[0]
+ self.assertTrue('Unable to create' in line)
+ self.assertTrue(datadir_path in line)
+
+ def test_collect_parts_skips_invalid_paths(self):
+ datadir_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy))
+ utils.mkdirs(os.path.dirname(datadir_path))
+ with open(datadir_path, 'w') as f:
+ f.write('junk')
+ with mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]):
+ self.assertEqual([], list(self.reconstructor.collect_parts()))
+ self.assertTrue(os.path.exists(datadir_path))
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(len(error_lines), 1)
+ line = error_lines[0]
+ self.assertTrue('Unable to list partitions' in line)
+ self.assertTrue(datadir_path in line)
+
+ def test_collect_parts_removes_non_partition_files(self):
+ # create some junk next to partitions
+ datadir_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy))
+ num_parts = 3
+ for part in range(num_parts):
+ utils.mkdirs(os.path.join(datadir_path, str(part)))
+ junk_file = os.path.join(datadir_path, 'junk')
+ with open(junk_file, 'w') as f:
+ f.write('junk')
+ with mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]):
+ part_infos = list(self.reconstructor.collect_parts())
+ # the file is not included in the part_infos map
+ self.assertEqual(sorted(p['part_path'] for p in part_infos),
+ sorted([os.path.join(datadir_path, str(i))
+ for i in range(num_parts)]))
+ # and gets cleaned up
+ self.assertFalse(os.path.exists(junk_file))
+
+ def test_collect_parts_overrides(self):
+ # setup multiple devices, with multiple parts
+ device_parts = {
+ 'sda': (374, 843),
+ 'sdb': (179, 807),
+ 'sdc': (363, 468, 843),
+ }
+ datadir = diskfile.get_data_dir(self.policy)
+ for dev, parts in device_parts.items():
+ for part in parts:
+ utils.mkdirs(os.path.join(
+ self.devices, dev,
+ datadir, str(part)))
+
+ # we're only going to add sda and sdc into the ring
+ local_devs = ('sda', 'sdc')
+ stub_ring_devs = [{
+ 'device': dev,
+ 'replication_ip': self.ip,
+ 'replication_port': self.port
+ } for dev in local_devs]
+
+ expected = (
+ ({}, [
+ ('sda', 374),
+ ('sda', 843),
+ ('sdc', 363),
+ ('sdc', 468),
+ ('sdc', 843),
+ ]),
+ ({'override_devices': ['sda', 'sdc']}, [
+ ('sda', 374),
+ ('sda', 843),
+ ('sdc', 363),
+ ('sdc', 468),
+ ('sdc', 843),
+ ]),
+ ({'override_devices': ['sdc']}, [
+ ('sdc', 363),
+ ('sdc', 468),
+ ('sdc', 843),
+ ]),
+ ({'override_devices': ['sda']}, [
+ ('sda', 374),
+ ('sda', 843),
+ ]),
+ ({'override_devices': ['sdx']}, []),
+ ({'override_partitions': [374]}, [
+ ('sda', 374),
+ ]),
+ ({'override_partitions': [843]}, [
+ ('sda', 843),
+ ('sdc', 843),
+ ]),
+ ({'override_partitions': [843], 'override_devices': ['sda']}, [
+ ('sda', 843),
+ ]),
+ )
+ with nested(mock.patch('swift.obj.reconstructor.whataremyips',
+ return_value=[self.ip]),
+ mock.patch.object(self.policy.object_ring, '_devs',
+ new=stub_ring_devs)):
+ for kwargs, expected_parts in expected:
+ part_infos = list(self.reconstructor.collect_parts(**kwargs))
+ expected_paths = set(
+ os.path.join(self.devices, dev, datadir, str(part))
+ for dev, part in expected_parts)
+ found_paths = set(p['part_path'] for p in part_infos)
+ msg = 'expected %r != %r for %r' % (
+ expected_paths, found_paths, kwargs)
+ self.assertEqual(expected_paths, found_paths, msg)
+
+ def test_build_jobs_creates_empty_hashes(self):
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy), '0')
+ utils.mkdirs(part_path)
+ part_info = {
+ 'local_dev': self.local_dev,
+ 'policy': self.policy,
+ 'partition': 0,
+ 'part_path': part_path,
+ }
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertEqual(1, len(jobs))
+ job = jobs[0]
+ self.assertEqual(job['job_type'], object_reconstructor.SYNC)
+ self.assertEqual(job['frag_index'], 0)
+ self.assertEqual(job['suffixes'], [])
+ self.assertEqual(len(job['sync_to']), 2)
+ self.assertEqual(job['partition'], 0)
+ self.assertEqual(job['path'], part_path)
+ self.assertEqual(job['hashes'], {})
+ self.assertEqual(job['policy'], self.policy)
+ self.assertEqual(job['local_dev'], self.local_dev)
+ self.assertEqual(job['device'], self.local_dev['device'])
+ hashes_file = os.path.join(part_path,
+ diskfile.HASH_FILE)
+ self.assertTrue(os.path.exists(hashes_file))
+ suffixes = self.reconstructor._get_hashes(
+ self.policy, part_path, do_listdir=True)
+ self.assertEqual(suffixes, {})
+
+ def test_build_jobs_no_hashes(self):
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy), '0')
+ part_info = {
+ 'local_dev': self.local_dev,
+ 'policy': self.policy,
+ 'partition': 0,
+ 'part_path': part_path,
+ }
+ stub_hashes = {}
+ with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes)):
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertEqual(1, len(jobs))
+ job = jobs[0]
+ self.assertEqual(job['job_type'], object_reconstructor.SYNC)
+ self.assertEqual(job['frag_index'], 0)
+ self.assertEqual(job['suffixes'], [])
+ self.assertEqual(len(job['sync_to']), 2)
+ self.assertEqual(job['partition'], 0)
+ self.assertEqual(job['path'], part_path)
+ self.assertEqual(job['hashes'], {})
+ self.assertEqual(job['policy'], self.policy)
+ self.assertEqual(job['local_dev'], self.local_dev)
+ self.assertEqual(job['device'], self.local_dev['device'])
+
+ def test_build_jobs_primary(self):
+ ring = self.policy.object_ring = FabricatedRing()
+ # find a partition for which we're a primary
+ for partition in range(2 ** ring.part_power):
+ part_nodes = ring.get_part_nodes(partition)
+ try:
+ frag_index = [n['id'] for n in part_nodes].index(
+ self.local_dev['id'])
+ except ValueError:
+ pass
+ else:
+ break
+ else:
+ self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ part_info = {
+ 'local_dev': self.local_dev,
+ 'policy': self.policy,
+ 'partition': partition,
+ 'part_path': part_path,
+ }
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes)):
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertEqual(1, len(jobs))
+ job = jobs[0]
+ self.assertEqual(job['job_type'], object_reconstructor.SYNC)
+ self.assertEqual(job['frag_index'], frag_index)
+ self.assertEqual(job['suffixes'], stub_hashes.keys())
+ self.assertEqual(set([n['index'] for n in job['sync_to']]),
+ set([(frag_index + 1) % ring.replicas,
+ (frag_index - 1) % ring.replicas]))
+ self.assertEqual(job['partition'], partition)
+ self.assertEqual(job['path'], part_path)
+ self.assertEqual(job['hashes'], stub_hashes)
+ self.assertEqual(job['policy'], self.policy)
+ self.assertEqual(job['local_dev'], self.local_dev)
+ self.assertEqual(job['device'], self.local_dev['device'])
+
+ def test_build_jobs_handoff(self):
+ ring = self.policy.object_ring = FabricatedRing()
+ # find a partition for which we're a handoff
+ for partition in range(2 ** ring.part_power):
+ part_nodes = ring.get_part_nodes(partition)
+ if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
+ break
+ else:
+ self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ part_info = {
+ 'local_dev': self.local_dev,
+ 'policy': self.policy,
+ 'partition': partition,
+ 'part_path': part_path,
+ }
+ # since this part doesn't belong on us it doesn't matter what
+ # frag_index we have
+ frag_index = random.randint(0, ring.replicas - 1)
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {None: 'hash'},
+ }
+ with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes)):
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertEqual(1, len(jobs))
+ job = jobs[0]
+ self.assertEqual(job['job_type'], object_reconstructor.REVERT)
+ self.assertEqual(job['frag_index'], frag_index)
+ self.assertEqual(sorted(job['suffixes']), sorted(stub_hashes.keys()))
+ self.assertEqual(len(job['sync_to']), 1)
+ self.assertEqual(job['sync_to'][0]['index'], frag_index)
+ self.assertEqual(job['path'], part_path)
+ self.assertEqual(job['partition'], partition)
+ self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
+ self.assertEqual(job['local_dev'], self.local_dev)
+
+ def test_build_jobs_mixed(self):
+ ring = self.policy.object_ring = FabricatedRing()
+ # find a partition for which we're a primary
+ for partition in range(2 ** ring.part_power):
+ part_nodes = ring.get_part_nodes(partition)
+ try:
+ frag_index = [n['id'] for n in part_nodes].index(
+ self.local_dev['id'])
+ except ValueError:
+ pass
+ else:
+ break
+ else:
+ self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ part_info = {
+ 'local_dev': self.local_dev,
+ 'policy': self.policy,
+ 'partition': partition,
+ 'part_path': part_path,
+ }
+ other_frag_index = random.choice([f for f in range(ring.replicas)
+ if f != frag_index])
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ '456': {other_frag_index: 'hash', None: 'hash'},
+ 'abc': {None: 'hash'},
+ }
+ with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes)):
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertEqual(2, len(jobs))
+ sync_jobs, revert_jobs = [], []
+ for job in jobs:
+ self.assertEqual(job['partition'], partition)
+ self.assertEqual(job['path'], part_path)
+ self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
+ self.assertEqual(job['policy'], self.policy)
+ self.assertEqual(job['local_dev'], self.local_dev)
+ self.assertEqual(job['device'], self.local_dev['device'])
+ {
+ object_reconstructor.SYNC: sync_jobs,
+ object_reconstructor.REVERT: revert_jobs,
+ }[job['job_type']].append(job)
+ self.assertEqual(1, len(sync_jobs))
+ job = sync_jobs[0]
+ self.assertEqual(job['frag_index'], frag_index)
+ self.assertEqual(sorted(job['suffixes']), sorted(['123', 'abc']))
+ self.assertEqual(len(job['sync_to']), 2)
+ self.assertEqual(set([n['index'] for n in job['sync_to']]),
+ set([(frag_index + 1) % ring.replicas,
+ (frag_index - 1) % ring.replicas]))
+ self.assertEqual(1, len(revert_jobs))
+ job = revert_jobs[0]
+ self.assertEqual(job['frag_index'], other_frag_index)
+ self.assertEqual(job['suffixes'], ['456'])
+ self.assertEqual(len(job['sync_to']), 1)
+ self.assertEqual(job['sync_to'][0]['index'], other_frag_index)
+
+ def test_build_jobs_revert_only_tombstones(self):
+ ring = self.policy.object_ring = FabricatedRing()
+ # find a partition for which we're a handoff
+ for partition in range(2 ** ring.part_power):
+ part_nodes = ring.get_part_nodes(partition)
+ if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
+ break
+ else:
+ self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ part_info = {
+ 'local_dev': self.local_dev,
+ 'policy': self.policy,
+ 'partition': partition,
+ 'part_path': part_path,
+ }
+ # we have no fragment index to hint the jobs where they belong
+ stub_hashes = {
+ '123': {None: 'hash'},
+ 'abc': {None: 'hash'},
+ }
+ with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes)):
+ jobs = self.reconstructor.build_reconstruction_jobs(part_info)
+ self.assertEqual(len(jobs), 1)
+ job = jobs[0]
+ expected = {
+ 'job_type': object_reconstructor.REVERT,
+ 'frag_index': None,
+ 'suffixes': stub_hashes.keys(),
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ 'device': self.local_dev['device'],
+ }
+ self.assertEqual(ring.replica_count, len(job['sync_to']))
+ for k, v in expected.items():
+ msg = 'expected %s != %s for %s' % (
+ v, job[k], k)
+ self.assertEqual(v, job[k], msg)
+
+ def test_get_suffix_delta(self):
+ # different
+ local_suff = {'123': {None: 'abc', 0: 'def'}}
+ remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
+ local_index = 0
+ remote_index = 0
+ suffs = self.reconstructor.get_suffix_delta(local_suff,
+ local_index,
+ remote_suff,
+ remote_index)
+ self.assertEqual(suffs, ['123'])
+
+ # now the same
+ remote_suff = {'123': {None: 'abc', 0: 'def'}}
+ suffs = self.reconstructor.get_suffix_delta(local_suff,
+ local_index,
+ remote_suff,
+ remote_index)
+ self.assertEqual(suffs, [])
+
+ # now with a mis-matched None key (missing durable)
+ remote_suff = {'123': {None: 'ghi', 0: 'def'}}
+ suffs = self.reconstructor.get_suffix_delta(local_suff,
+ local_index,
+ remote_suff,
+ remote_index)
+ self.assertEqual(suffs, ['123'])
+
+ # now with bogus local index
+ local_suff = {'123': {None: 'abc', 99: 'def'}}
+ remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
+ suffs = self.reconstructor.get_suffix_delta(local_suff,
+ local_index,
+ remote_suff,
+ remote_index)
+ self.assertEqual(suffs, ['123'])
+
+ def test_process_job_primary_in_sync(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [n for n in self.policy.object_ring.devs
+ if n != self.local_dev][:2]
+ # setup left and right hashes
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
+ left_hashes = {
+ '123': {left_index: 'hash', None: 'hash'},
+ 'abc': {left_index: 'hash', None: 'hash'},
+ }
+ right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
+ right_hashes = {
+ '123': {right_index: 'hash', None: 'hash'},
+ 'abc': {right_index: 'hash', None: 'hash'},
+ }
+ partition = 0
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.SYNC,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ responses = [(200, pickle.dumps(hashes)) for hashes in (
+ left_hashes, right_hashes)]
+ codes, body_iter = zip(*responses)
+
+ ssync_calls = []
+
+ with nested(
+ mock_ssync_sender(ssync_calls),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
+ self.reconstructor.process_job(job)
+
+ expected_suffix_calls = set([
+ ('10.0.0.1', '/sdb/0'),
+ ('10.0.0.2', '/sdc/0'),
+ ])
+ self.assertEqual(expected_suffix_calls,
+ set((r['ip'], r['path'])
+ for r in request_log.requests))
+
+ self.assertEqual(len(ssync_calls), 0)
+
+ def test_process_job_primary_not_in_sync(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [n for n in self.policy.object_ring.devs
+ if n != self.local_dev][:2]
+ # setup left and right hashes
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ sync_to[0]['index'] = (frag_index - 1) % replicas
+ left_hashes = {}
+ sync_to[1]['index'] = (frag_index + 1) % replicas
+ right_hashes = {}
+
+ partition = 0
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.SYNC,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ responses = [(200, pickle.dumps(hashes)) for hashes in (
+ left_hashes, left_hashes, right_hashes, right_hashes)]
+ codes, body_iter = zip(*responses)
+
+ ssync_calls = []
+ with nested(
+ mock_ssync_sender(ssync_calls),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
+ self.reconstructor.process_job(job)
+
+ expected_suffix_calls = set([
+ ('10.0.0.1', '/sdb/0'),
+ ('10.0.0.1', '/sdb/0/123-abc'),
+ ('10.0.0.2', '/sdc/0'),
+ ('10.0.0.2', '/sdc/0/123-abc'),
+ ])
+ self.assertEqual(expected_suffix_calls,
+ set((r['ip'], r['path'])
+ for r in request_log.requests))
+
+ expected_ssync_calls = sorted([
+ ('10.0.0.1', 0, set(['123', 'abc'])),
+ ('10.0.0.2', 0, set(['123', 'abc'])),
+ ])
+ self.assertEqual(expected_ssync_calls, sorted((
+ c['node']['ip'],
+ c['job']['partition'],
+ set(c['suffixes']),
+ ) for c in ssync_calls))
+
+ def test_process_job_sync_missing_durable(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [n for n in self.policy.object_ring.devs
+ if n != self.local_dev][:2]
+ # setup left and right hashes
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ # left hand side is in sync
+ left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
+ left_hashes = {
+ '123': {left_index: 'hash', None: 'hash'},
+ 'abc': {left_index: 'hash', None: 'hash'},
+ }
+ # right hand side has fragment, but no durable (None key is whack)
+ right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
+ right_hashes = {
+ '123': {right_index: 'hash', None: 'hash'},
+ 'abc': {right_index: 'hash', None: 'different-because-durable'},
+ }
+
+ partition = 0
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.SYNC,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ responses = [(200, pickle.dumps(hashes)) for hashes in (
+ left_hashes, right_hashes, right_hashes)]
+ codes, body_iter = zip(*responses)
+
+ ssync_calls = []
+ with nested(
+ mock_ssync_sender(ssync_calls),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
+ self.reconstructor.process_job(job)
+
+ expected_suffix_calls = set([
+ ('10.0.0.1', '/sdb/0'),
+ ('10.0.0.2', '/sdc/0'),
+ ('10.0.0.2', '/sdc/0/abc'),
+ ])
+ self.assertEqual(expected_suffix_calls,
+ set((r['ip'], r['path'])
+ for r in request_log.requests))
+
+ expected_ssync_calls = sorted([
+ ('10.0.0.2', 0, ['abc']),
+ ])
+ self.assertEqual(expected_ssync_calls, sorted((
+ c['node']['ip'],
+ c['job']['partition'],
+ c['suffixes'],
+ ) for c in ssync_calls))
+
+ def test_process_job_primary_some_in_sync(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [n for n in self.policy.object_ring.devs
+ if n != self.local_dev][:2]
+ # setup left and right hashes
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
+ left_hashes = {
+ '123': {left_index: 'hashX', None: 'hash'},
+ 'abc': {left_index: 'hash', None: 'hash'},
+ }
+ right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
+ right_hashes = {
+ '123': {right_index: 'hash', None: 'hash'},
+ }
+ partition = 0
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.SYNC,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ responses = [(200, pickle.dumps(hashes)) for hashes in (
+ left_hashes, left_hashes, right_hashes, right_hashes)]
+ codes, body_iter = zip(*responses)
+
+ ssync_calls = []
+
+ with nested(
+ mock_ssync_sender(ssync_calls),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
+ self.reconstructor.process_job(job)
+
+ expected_suffix_calls = set([
+ ('10.0.0.1', '/sdb/0'),
+ ('10.0.0.1', '/sdb/0/123'),
+ ('10.0.0.2', '/sdc/0'),
+ ('10.0.0.2', '/sdc/0/abc'),
+ ])
+ self.assertEqual(expected_suffix_calls,
+ set((r['ip'], r['path'])
+ for r in request_log.requests))
+
+ self.assertEqual(len(ssync_calls), 2)
+ self.assertEqual(set(c['node']['index'] for c in ssync_calls),
+ set([left_index, right_index]))
+ for call in ssync_calls:
+ if call['node']['index'] == left_index:
+ self.assertEqual(call['suffixes'], ['123'])
+ elif call['node']['index'] == right_index:
+ self.assertEqual(call['suffixes'], ['abc'])
+ else:
+ self.fail('unexpected call %r' % call)
+
+ def test_process_job_primary_down(self):
+ replicas = self.policy.object_ring.replicas
+ partition = 0
+ frag_index = random.randint(0, replicas - 1)
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+
+ part_nodes = self.policy.object_ring.get_part_nodes(partition)
+ sync_to = part_nodes[:2]
+
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.SYNC,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'device': self.local_dev['device'],
+ 'local_dev': self.local_dev,
+ }
+
+ non_local = {'called': 0}
+
+ def ssync_response_callback(*args):
+ # in this test, ssync fails on the first (primary sync_to) node
+ if non_local['called'] >= 1:
+ return True, {}
+ non_local['called'] += 1
+ return False, {}
+
+ expected_suffix_calls = set()
+ for node in part_nodes[:3]:
+ expected_suffix_calls.update([
+ (node['replication_ip'], '/%s/0' % node['device']),
+ (node['replication_ip'], '/%s/0/123-abc' % node['device']),
+ ])
+
+ ssync_calls = []
+ with nested(
+ mock_ssync_sender(ssync_calls,
+ response_callback=ssync_response_callback),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*[200] * len(expected_suffix_calls),
+ body=pickle.dumps({})) as request_log:
+ self.reconstructor.process_job(job)
+
+ found_suffix_calls = set((r['ip'], r['path'])
+ for r in request_log.requests)
+ self.assertEqual(expected_suffix_calls, found_suffix_calls)
+
+ expected_ssync_calls = sorted([
+ ('10.0.0.0', 0, set(['123', 'abc'])),
+ ('10.0.0.1', 0, set(['123', 'abc'])),
+ ('10.0.0.2', 0, set(['123', 'abc'])),
+ ])
+ found_ssync_calls = sorted((
+ c['node']['ip'],
+ c['job']['partition'],
+ set(c['suffixes']),
+ ) for c in ssync_calls)
+ self.assertEqual(expected_ssync_calls, found_ssync_calls)
+
+ def test_process_job_suffix_call_errors(self):
+ replicas = self.policy.object_ring.replicas
+ partition = 0
+ frag_index = random.randint(0, replicas - 1)
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+
+ part_nodes = self.policy.object_ring.get_part_nodes(partition)
+ sync_to = part_nodes[:2]
+
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.SYNC,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'device': self.local_dev['device'],
+ 'local_dev': self.local_dev,
+ }
+
+ expected_suffix_calls = set((
+ node['replication_ip'], '/%s/0' % node['device']
+ ) for node in part_nodes)
+
+ possible_errors = [404, 507, Timeout(), Exception('kaboom!')]
+ codes = [random.choice(possible_errors)
+ for r in expected_suffix_calls]
+
+ ssync_calls = []
+ with nested(
+ mock_ssync_sender(ssync_calls),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*codes) as request_log:
+ self.reconstructor.process_job(job)
+
+ found_suffix_calls = set((r['ip'], r['path'])
+ for r in request_log.requests)
+ self.assertEqual(expected_suffix_calls, found_suffix_calls)
+
+ self.assertFalse(ssync_calls)
+
+ def test_process_job_handoff(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [random.choice([n for n in self.policy.object_ring.devs
+ if n != self.local_dev])]
+ sync_to[0]['index'] = frag_index
+
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ partition = 0
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.REVERT,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ ssync_calls = []
+ with nested(
+ mock_ssync_sender(ssync_calls),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
+ self.reconstructor.process_job(job)
+
+ expected_suffix_calls = set([
+ (sync_to[0]['ip'], '/%s/0/123-abc' % sync_to[0]['device']),
+ ])
+ found_suffix_calls = set((r['ip'], r['path'])
+ for r in request_log.requests)
+ self.assertEqual(expected_suffix_calls, found_suffix_calls)
+
+ self.assertEqual(len(ssync_calls), 1)
+ call = ssync_calls[0]
+ self.assertEqual(call['node'], sync_to[0])
+ self.assertEqual(set(call['suffixes']), set(['123', 'abc']))
+
+ def test_process_job_revert_to_handoff(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [random.choice([n for n in self.policy.object_ring.devs
+ if n != self.local_dev])]
+ sync_to[0]['index'] = frag_index
+ partition = 0
+ handoff = next(self.policy.object_ring.get_more_nodes(partition))
+
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.REVERT,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ non_local = {'called': 0}
+
+ def ssync_response_callback(*args):
+ # in this test, ssync fails on the first (primary sync_to) node
+ if non_local['called'] >= 1:
+ return True, {}
+ non_local['called'] += 1
+ return False, {}
+
+ expected_suffix_calls = set([
+ (node['replication_ip'], '/%s/0/123-abc' % node['device'])
+ for node in (sync_to[0], handoff)
+ ])
+
+ ssync_calls = []
+ with nested(
+ mock_ssync_sender(ssync_calls,
+ response_callback=ssync_response_callback),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*[200] * len(expected_suffix_calls),
+ body=pickle.dumps({})) as request_log:
+ self.reconstructor.process_job(job)
+
+ found_suffix_calls = set((r['ip'], r['path'])
+ for r in request_log.requests)
+ self.assertEqual(expected_suffix_calls, found_suffix_calls)
+
+ self.assertEqual(len(ssync_calls), len(expected_suffix_calls))
+ call = ssync_calls[0]
+ self.assertEqual(call['node'], sync_to[0])
+ self.assertEqual(set(call['suffixes']), set(['123', 'abc']))
+
+ def test_process_job_revert_is_handoff(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [random.choice([n for n in self.policy.object_ring.devs
+ if n != self.local_dev])]
+ sync_to[0]['index'] = frag_index
+ partition = 0
+ handoff_nodes = list(self.policy.object_ring.get_more_nodes(partition))
+
+ stub_hashes = {
+ '123': {frag_index: 'hash', None: 'hash'},
+ 'abc': {frag_index: 'hash', None: 'hash'},
+ }
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ job = {
+ 'job_type': object_reconstructor.REVERT,
+ 'frag_index': frag_index,
+ 'suffixes': stub_hashes.keys(),
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': stub_hashes,
+ 'policy': self.policy,
+ 'local_dev': handoff_nodes[-1],
+ }
+
+ def ssync_response_callback(*args):
+ # in this test ssync always fails, until we encounter ourselves in
+ # the list of possible handoff's to sync to
+ return False, {}
+
+ expected_suffix_calls = set([
+ (sync_to[0]['replication_ip'],
+ '/%s/0/123-abc' % sync_to[0]['device'])
+ ] + [
+ (node['replication_ip'], '/%s/0/123-abc' % node['device'])
+ for node in handoff_nodes[:-1]
+ ])
+
+ ssync_calls = []
+ with nested(
+ mock_ssync_sender(ssync_calls,
+ response_callback=ssync_response_callback),
+ mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
+ return_value=(None, stub_hashes))):
+ with mocked_http_conn(*[200] * len(expected_suffix_calls),
+ body=pickle.dumps({})) as request_log:
+ self.reconstructor.process_job(job)
+
+ found_suffix_calls = set((r['ip'], r['path'])
+ for r in request_log.requests)
+ self.assertEqual(expected_suffix_calls, found_suffix_calls)
+
+ # this is ssync call to primary (which fails) plus the ssync call to
+ # all of the handoffs (except the last one - which is the local_dev)
+ self.assertEqual(len(ssync_calls), len(handoff_nodes))
+ call = ssync_calls[0]
+ self.assertEqual(call['node'], sync_to[0])
+ self.assertEqual(set(call['suffixes']), set(['123', 'abc']))
+
+ def test_process_job_revert_cleanup(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [random.choice([n for n in self.policy.object_ring.devs
+ if n != self.local_dev])]
+ sync_to[0]['index'] = frag_index
+ partition = 0
+
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ os.makedirs(part_path)
+ df_mgr = self.reconstructor._df_router[self.policy]
+ df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
+ 'c', 'data-obj', policy=self.policy)
+ ts = self.ts()
+ with df.create() as writer:
+ test_data = 'test data'
+ writer.write(test_data)
+ metadata = {
+ 'X-Timestamp': ts.internal,
+ 'Content-Length': len(test_data),
+ 'Etag': md5(test_data).hexdigest(),
+ 'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
+ }
+ writer.put(metadata)
+ writer.commit(ts)
+
+ ohash = os.path.basename(df._datadir)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+
+ job = {
+ 'job_type': object_reconstructor.REVERT,
+ 'frag_index': frag_index,
+ 'suffixes': [suffix],
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': {},
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ def ssync_response_callback(*args):
+ return True, {ohash: ts}
+
+ ssync_calls = []
+ with mock_ssync_sender(ssync_calls,
+ response_callback=ssync_response_callback):
+ with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
+ self.reconstructor.process_job(job)
+
+ self.assertEqual([
+ (sync_to[0]['replication_ip'], '/%s/0/%s' % (
+ sync_to[0]['device'], suffix)),
+ ], [
+ (r['ip'], r['path']) for r in request_log.requests
+ ])
+ # hashpath is still there, but only the durable remains
+ files = os.listdir(df._datadir)
+ self.assertEqual(1, len(files))
+ self.assertTrue(files[0].endswith('.durable'))
+
+ # and more to the point, the next suffix recalc will clean it up
+ df_mgr = self.reconstructor._df_router[self.policy]
+ df_mgr.get_hashes(self.local_dev['device'], str(partition), [],
+ self.policy)
+ self.assertFalse(os.access(df._datadir, os.F_OK))
+
+ def test_process_job_revert_cleanup_tombstone(self):
+ replicas = self.policy.object_ring.replicas
+ frag_index = random.randint(0, replicas - 1)
+ sync_to = [random.choice([n for n in self.policy.object_ring.devs
+ if n != self.local_dev])]
+ sync_to[0]['index'] = frag_index
+ partition = 0
+
+ part_path = os.path.join(self.devices, self.local_dev['device'],
+ diskfile.get_data_dir(self.policy),
+ str(partition))
+ os.makedirs(part_path)
+ df_mgr = self.reconstructor._df_router[self.policy]
+ df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
+ 'c', 'data-obj', policy=self.policy)
+ ts = self.ts()
+ df.delete(ts)
+
+ ohash = os.path.basename(df._datadir)
+ suffix = os.path.basename(os.path.dirname(df._datadir))
+
+ job = {
+ 'job_type': object_reconstructor.REVERT,
+ 'frag_index': frag_index,
+ 'suffixes': [suffix],
+ 'sync_to': sync_to,
+ 'partition': partition,
+ 'path': part_path,
+ 'hashes': {},
+ 'policy': self.policy,
+ 'local_dev': self.local_dev,
+ }
+
+ def ssync_response_callback(*args):
+ return True, {ohash: ts}
+
+ ssync_calls = []
+ with mock_ssync_sender(ssync_calls,
+ response_callback=ssync_response_callback):
+ with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
+ self.reconstructor.process_job(job)
+
+ self.assertEqual([
+ (sync_to[0]['replication_ip'], '/%s/0/%s' % (
+ sync_to[0]['device'], suffix)),
+ ], [
+ (r['ip'], r['path']) for r in request_log.requests
+ ])
+ # hashpath is still there, but it's empty
+ self.assertEqual([], os.listdir(df._datadir))
+
+ def test_reconstruct_fa_no_errors(self):
+ job = {
+ 'partition': 0,
+ 'policy': self.policy,
+ }
+ part_nodes = self.policy.object_ring.get_part_nodes(0)
+ node = part_nodes[1]
+ metadata = {
+ 'name': '/a/c/o',
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ }
+
+ test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
+
+ broken_body = ec_archive_bodies.pop(1)
+
+ responses = list((200, body) for body in ec_archive_bodies)
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ codes, body_iter = zip(*responses)
+ with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
+ df = self.reconstructor.reconstruct_fa(
+ job, node, metadata)
+ fixed_body = ''.join(df.reader())
+ self.assertEqual(len(fixed_body), len(broken_body))
+ self.assertEqual(md5(fixed_body).hexdigest(),
+ md5(broken_body).hexdigest())
+
+ def test_reconstruct_fa_errors_works(self):
+ job = {
+ 'partition': 0,
+ 'policy': self.policy,
+ }
+ part_nodes = self.policy.object_ring.get_part_nodes(0)
+ node = part_nodes[4]
+ metadata = {
+ 'name': '/a/c/o',
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ }
+
+ test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
+
+ broken_body = ec_archive_bodies.pop(4)
+
+ base_responses = list((200, body) for body in ec_archive_bodies)
+ # since we're already missing a fragment a +2 scheme can only support
+ # one additional failure at a time
+ for error in (Timeout(), 404, Exception('kaboom!')):
+ responses = list(base_responses)
+ error_index = random.randint(0, len(responses) - 1)
+ responses[error_index] = (error, '')
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ codes, body_iter = zip(*responses)
+ with mocked_http_conn(*codes, body_iter=body_iter,
+ headers=headers):
+ df = self.reconstructor.reconstruct_fa(
+ job, node, dict(metadata))
+ fixed_body = ''.join(df.reader())
+ self.assertEqual(len(fixed_body), len(broken_body))
+ self.assertEqual(md5(fixed_body).hexdigest(),
+ md5(broken_body).hexdigest())
+
+ def test_reconstruct_fa_errors_fails(self):
+ job = {
+ 'partition': 0,
+ 'policy': self.policy,
+ }
+ part_nodes = self.policy.object_ring.get_part_nodes(0)
+ node = part_nodes[1]
+ policy = self.policy
+ metadata = {
+ 'name': '/a/c/o',
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ }
+
+ possible_errors = [404, Timeout(), Exception('kaboom!')]
+ codes = [random.choice(possible_errors) for i in
+ range(policy.object_ring.replicas - 1)]
+ with mocked_http_conn(*codes):
+ self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
+ job, node, metadata)
+
+ def test_reconstruct_fa_with_mixed_old_etag(self):
+ job = {
+ 'partition': 0,
+ 'policy': self.policy,
+ }
+ part_nodes = self.policy.object_ring.get_part_nodes(0)
+ node = part_nodes[1]
+ metadata = {
+ 'name': '/a/c/o',
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ }
+
+ test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
+
+ broken_body = ec_archive_bodies.pop(1)
+
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ # bad response
+ bad_response = (200, '', {
+ 'X-Object-Sysmeta-Ec-Etag': 'some garbage',
+ 'X-Backend-Timestamp': next(ts).internal,
+ })
+
+ # good responses
+ headers = {
+ 'X-Object-Sysmeta-Ec-Etag': etag,
+ 'X-Backend-Timestamp': next(ts).internal
+ }
+ responses = [(200, body, headers)
+ for body in ec_archive_bodies]
+ # mixed together
+ error_index = random.randint(0, len(responses) - 2)
+ responses[error_index] = bad_response
+ codes, body_iter, headers = zip(*responses)
+ with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
+ df = self.reconstructor.reconstruct_fa(
+ job, node, metadata)
+ fixed_body = ''.join(df.reader())
+ self.assertEqual(len(fixed_body), len(broken_body))
+ self.assertEqual(md5(fixed_body).hexdigest(),
+ md5(broken_body).hexdigest())
+
+ def test_reconstruct_fa_with_mixed_new_etag(self):
+ job = {
+ 'partition': 0,
+ 'policy': self.policy,
+ }
+ part_nodes = self.policy.object_ring.get_part_nodes(0)
+ node = part_nodes[1]
+ metadata = {
+ 'name': '/a/c/o',
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ }
+
+ test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
+
+ broken_body = ec_archive_bodies.pop(1)
+
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ # good responses
+ headers = {
+ 'X-Object-Sysmeta-Ec-Etag': etag,
+ 'X-Backend-Timestamp': next(ts).internal
+ }
+ responses = [(200, body, headers)
+ for body in ec_archive_bodies]
+ codes, body_iter, headers = zip(*responses)
+
+ # sanity check before negative test
+ with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
+ df = self.reconstructor.reconstruct_fa(
+ job, node, dict(metadata))
+ fixed_body = ''.join(df.reader())
+ self.assertEqual(len(fixed_body), len(broken_body))
+ self.assertEqual(md5(fixed_body).hexdigest(),
+ md5(broken_body).hexdigest())
+
+ # one newer etag can spoil the bunch
+ new_response = (200, '', {
+ 'X-Object-Sysmeta-Ec-Etag': 'some garbage',
+ 'X-Backend-Timestamp': next(ts).internal,
+ })
+ new_index = random.randint(0, len(responses) - self.policy.ec_nparity)
+ responses[new_index] = new_response
+ codes, body_iter, headers = zip(*responses)
+ with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
+ self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
+ job, node, dict(metadata))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py
index ab89e4925..f169e52dd 100644
--- a/test/unit/obj/test_replicator.py
+++ b/test/unit/obj/test_replicator.py
@@ -27,7 +27,7 @@ from errno import ENOENT, ENOTEMPTY, ENOTDIR
from eventlet.green import subprocess
from eventlet import Timeout, tpool
-from test.unit import FakeLogger, debug_logger, patch_policies
+from test.unit import debug_logger, patch_policies
from swift.common import utils
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
storage_directory
@@ -173,9 +173,9 @@ class TestObjectReplicator(unittest.TestCase):
os.mkdir(self.devices)
os.mkdir(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda',
- diskfile.get_data_dir(0))
+ diskfile.get_data_dir(POLICIES[0]))
self.objects_1 = os.path.join(self.devices, 'sda',
- diskfile.get_data_dir(1))
+ diskfile.get_data_dir(POLICIES[1]))
os.mkdir(self.objects)
os.mkdir(self.objects_1)
self.parts = {}
@@ -190,7 +190,7 @@ class TestObjectReplicator(unittest.TestCase):
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1', sync_method='rsync')
self.replicator = object_replicator.ObjectReplicator(self.conf)
- self.replicator.logger = FakeLogger()
+ self.logger = self.replicator.logger = debug_logger('test-replicator')
self.df_mgr = diskfile.DiskFileManager(self.conf,
self.replicator.logger)
@@ -205,7 +205,7 @@ class TestObjectReplicator(unittest.TestCase):
object_replicator.http_connect = mock_http_connect(200)
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
- policy_idx=0)
+ policy=POLICIES[0])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -216,7 +216,7 @@ class TestObjectReplicator(unittest.TestCase):
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, cur_part, data_dir)
process_arg_checker = []
- ring = replicator.get_object_ring(0)
+ ring = replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(int(cur_part))
if node['ip'] not in _ips()]
@@ -239,7 +239,7 @@ class TestObjectReplicator(unittest.TestCase):
object_replicator.http_connect = mock_http_connect(200)
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
- policy_idx=1)
+ policy=POLICIES[1])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -250,7 +250,7 @@ class TestObjectReplicator(unittest.TestCase):
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects_1, cur_part, data_dir)
process_arg_checker = []
- ring = replicator.get_object_ring(1)
+ ring = replicator.load_object_ring(POLICIES[1])
nodes = [node for node in
ring.get_part_nodes(int(cur_part))
if node['ip'] not in _ips()]
@@ -266,7 +266,7 @@ class TestObjectReplicator(unittest.TestCase):
def test_check_ring(self):
for pol in POLICIES:
- obj_ring = self.replicator.get_object_ring(pol.idx)
+ obj_ring = self.replicator.load_object_ring(pol)
self.assertTrue(self.replicator.check_ring(obj_ring))
orig_check = self.replicator.next_check
self.replicator.next_check = orig_check - 30
@@ -280,29 +280,27 @@ class TestObjectReplicator(unittest.TestCase):
def test_collect_jobs_mkdirs_error(self):
+ non_local = {}
+
def blowup_mkdirs(path):
+ non_local['path'] = path
raise OSError('Ow!')
with mock.patch.object(object_replicator, 'mkdirs', blowup_mkdirs):
rmtree(self.objects, ignore_errors=1)
object_replicator.mkdirs = blowup_mkdirs
self.replicator.collect_jobs()
- self.assertTrue('exception' in self.replicator.logger.log_dict)
- self.assertEquals(
- len(self.replicator.logger.log_dict['exception']), 1)
- exc_args, exc_kwargs, exc_str = \
- self.replicator.logger.log_dict['exception'][0]
- self.assertEquals(len(exc_args), 1)
- self.assertTrue(exc_args[0].startswith('ERROR creating '))
- self.assertEquals(exc_kwargs, {})
- self.assertEquals(exc_str, 'Ow!')
+ self.assertEqual(self.logger.get_lines_for_level('error'), [
+ 'ERROR creating %s: ' % non_local['path']])
+ log_args, log_kwargs = self.logger.log_dict['error'][0]
+ self.assertEqual(str(log_kwargs['exc_info'][1]), 'Ow!')
def test_collect_jobs(self):
jobs = self.replicator.collect_jobs()
jobs_to_delete = [j for j in jobs if j['delete']]
jobs_by_pol_part = {}
for job in jobs:
- jobs_by_pol_part[str(job['policy_idx']) + job['partition']] = job
+ jobs_by_pol_part[str(int(job['policy'])) + job['partition']] = job
self.assertEquals(len(jobs_to_delete), 2)
self.assertTrue('1', jobs_to_delete[0]['partition'])
self.assertEquals(
@@ -383,19 +381,19 @@ class TestObjectReplicator(unittest.TestCase):
self.assertFalse(os.path.exists(pol_0_part_1_path))
self.assertFalse(os.path.exists(pol_1_part_1_path))
-
- logged_warnings = sorted(self.replicator.logger.log_dict['warning'])
- self.assertEquals(
- (('Removing partition directory which was a file: %s',
- pol_1_part_1_path), {}), logged_warnings[0])
- self.assertEquals(
- (('Removing partition directory which was a file: %s',
- pol_0_part_1_path), {}), logged_warnings[1])
+ self.assertEqual(
+ sorted(self.logger.get_lines_for_level('warning')), [
+ ('Removing partition directory which was a file: %s'
+ % pol_1_part_1_path),
+ ('Removing partition directory which was a file: %s'
+ % pol_0_part_1_path),
+ ])
def test_delete_partition(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -407,7 +405,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
- ring = self.replicator.get_object_ring(0)
+ ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
@@ -424,7 +422,8 @@ class TestObjectReplicator(unittest.TestCase):
self.replicator.conf.pop('sync_method')
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -436,7 +435,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
- ring = self.replicator.get_object_ring(0)
+ ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
@@ -473,10 +472,11 @@ class TestObjectReplicator(unittest.TestCase):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
- f = open(os.path.join(df._datadir,
- normalize_timestamp(time.time()) + '.data'),
+ ts = normalize_timestamp(time.time())
+ f = open(os.path.join(df._datadir, ts + '.data'),
'wb')
f.write('1234567890')
f.close()
@@ -487,7 +487,7 @@ class TestObjectReplicator(unittest.TestCase):
self.assertTrue(os.access(part_path, os.F_OK))
def _fake_ssync(node, job, suffixes, **kwargs):
- return True, set([ohash])
+ return True, {ohash: ts}
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
@@ -499,7 +499,7 @@ class TestObjectReplicator(unittest.TestCase):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
- policy_idx=1)
+ policy=POLICIES[1])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -511,7 +511,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects_1, '1', data_dir)
part_path = os.path.join(self.objects_1, '1')
self.assertTrue(os.access(part_path, os.F_OK))
- ring = self.replicator.get_object_ring(1)
+ ring = self.replicator.load_object_ring(POLICIES[1])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
@@ -527,7 +527,8 @@ class TestObjectReplicator(unittest.TestCase):
def test_delete_partition_with_failures(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -539,7 +540,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
- ring = self.replicator.get_object_ring(0)
+ ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
@@ -562,7 +563,8 @@ class TestObjectReplicator(unittest.TestCase):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.handoff_delete = 2
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -574,7 +576,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
- ring = self.replicator.get_object_ring(0)
+ ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
@@ -596,7 +598,8 @@ class TestObjectReplicator(unittest.TestCase):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.handoff_delete = 2
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -608,7 +611,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
- ring = self.replicator.get_object_ring(0)
+ ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
@@ -630,7 +633,8 @@ class TestObjectReplicator(unittest.TestCase):
def test_delete_partition_with_handoff_delete_fail_in_other_region(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -642,7 +646,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
- ring = self.replicator.get_object_ring(0)
+ ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
@@ -662,7 +666,8 @@ class TestObjectReplicator(unittest.TestCase):
self.assertTrue(os.access(part_path, os.F_OK))
def test_delete_partition_override_params(self):
- df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
@@ -675,9 +680,10 @@ class TestObjectReplicator(unittest.TestCase):
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_policy_override_params(self):
- df0 = self.df_mgr.get_diskfile('sda', '99', 'a', 'c', 'o')
+ df0 = self.df_mgr.get_diskfile('sda', '99', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
df1 = self.df_mgr.get_diskfile('sda', '99', 'a', 'c', 'o',
- policy_idx=1)
+ policy=POLICIES[1])
mkdirs(df0._datadir)
mkdirs(df1._datadir)
@@ -698,10 +704,11 @@ class TestObjectReplicator(unittest.TestCase):
def test_delete_partition_ssync(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
- f = open(os.path.join(df._datadir,
- normalize_timestamp(time.time()) + '.data'),
+ ts = normalize_timestamp(time.time())
+ f = open(os.path.join(df._datadir, ts + '.data'),
'wb')
f.write('0')
f.close()
@@ -716,14 +723,14 @@ class TestObjectReplicator(unittest.TestCase):
def _fake_ssync(node, job, suffixes, **kwargs):
success = True
- ret_val = [whole_path_from]
+ ret_val = {ohash: ts}
if self.call_nums == 2:
# ssync should return (True, []) only when the second
# candidate node has not get the replica yet.
success = False
- ret_val = []
+ ret_val = {}
self.call_nums += 1
- return success, set(ret_val)
+ return success, ret_val
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
@@ -746,11 +753,11 @@ class TestObjectReplicator(unittest.TestCase):
def test_delete_partition_ssync_with_sync_failure(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
+ ts = normalize_timestamp(time.time())
mkdirs(df._datadir)
- f = open(os.path.join(df._datadir,
- normalize_timestamp(time.time()) + '.data'),
- 'wb')
+ f = open(os.path.join(df._datadir, ts + '.data'), 'wb')
f.write('0')
f.close()
ohash = hash_path('a', 'c', 'o')
@@ -763,14 +770,14 @@ class TestObjectReplicator(unittest.TestCase):
def _fake_ssync(node, job, suffixes, **kwags):
success = False
- ret_val = []
+ ret_val = {}
if self.call_nums == 2:
# ssync should return (True, []) only when the second
# candidate node has not get the replica yet.
success = True
- ret_val = [whole_path_from]
+ ret_val = {ohash: ts}
self.call_nums += 1
- return success, set(ret_val)
+ return success, ret_val
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
@@ -794,11 +801,11 @@ class TestObjectReplicator(unittest.TestCase):
self.replicator.logger = debug_logger('test-replicator')
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
- f = open(os.path.join(df._datadir,
- normalize_timestamp(time.time()) + '.data'),
- 'wb')
+ ts = normalize_timestamp(time.time())
+ f = open(os.path.join(df._datadir, ts + '.data'), 'wb')
f.write('0')
f.close()
ohash = hash_path('a', 'c', 'o')
@@ -809,16 +816,16 @@ class TestObjectReplicator(unittest.TestCase):
self.call_nums = 0
self.conf['sync_method'] = 'ssync'
- in_sync_objs = []
+ in_sync_objs = {}
def _fake_ssync(node, job, suffixes, remote_check_objs=None):
self.call_nums += 1
if remote_check_objs is None:
# sync job
- ret_val = [whole_path_from]
+ ret_val = {ohash: ts}
else:
ret_val = in_sync_objs
- return True, set(ret_val)
+ return True, ret_val
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
@@ -833,12 +840,13 @@ class TestObjectReplicator(unittest.TestCase):
def test_delete_partition_ssync_with_cleanup_failure(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
- self.replicator.logger = mock_logger = mock.MagicMock()
- df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
+ self.replicator.logger = mock_logger = \
+ debug_logger('test-replicator')
+ df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
- f = open(os.path.join(df._datadir,
- normalize_timestamp(time.time()) + '.data'),
- 'wb')
+ ts = normalize_timestamp(time.time())
+ f = open(os.path.join(df._datadir, ts + '.data'), 'wb')
f.write('0')
f.close()
ohash = hash_path('a', 'c', 'o')
@@ -852,14 +860,14 @@ class TestObjectReplicator(unittest.TestCase):
def _fake_ssync(node, job, suffixes, **kwargs):
success = True
- ret_val = [whole_path_from]
+ ret_val = {ohash: ts}
if self.call_nums == 2:
# ssync should return (True, []) only when the second
# candidate node has not get the replica yet.
success = False
- ret_val = []
+ ret_val = {}
self.call_nums += 1
- return success, set(ret_val)
+ return success, ret_val
rmdir_func = os.rmdir
@@ -886,7 +894,7 @@ class TestObjectReplicator(unittest.TestCase):
with mock.patch('os.rmdir',
raise_exception_rmdir(OSError, ENOENT)):
self.replicator.replicate()
- self.assertEquals(mock_logger.exception.call_count, 0)
+ self.assertFalse(mock_logger.get_lines_for_level('error'))
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
@@ -895,7 +903,7 @@ class TestObjectReplicator(unittest.TestCase):
with mock.patch('os.rmdir',
raise_exception_rmdir(OSError, ENOTEMPTY)):
self.replicator.replicate()
- self.assertEquals(mock_logger.exception.call_count, 0)
+ self.assertFalse(mock_logger.get_lines_for_level('error'))
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
@@ -904,7 +912,7 @@ class TestObjectReplicator(unittest.TestCase):
with mock.patch('os.rmdir',
raise_exception_rmdir(OSError, ENOTDIR)):
self.replicator.replicate()
- self.assertEquals(mock_logger.exception.call_count, 1)
+ self.assertEqual(len(mock_logger.get_lines_for_level('error')), 1)
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
@@ -929,7 +937,8 @@ class TestObjectReplicator(unittest.TestCase):
# Write some files into '1' and run replicate- they should be moved
# to the other partitions and then node should get deleted.
cur_part = '1'
- df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -939,7 +948,7 @@ class TestObjectReplicator(unittest.TestCase):
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, cur_part, data_dir)
- ring = replicator.get_object_ring(0)
+ ring = replicator.load_object_ring(POLICIES[0])
process_arg_checker = []
nodes = [node for node in
ring.get_part_nodes(int(cur_part))
@@ -993,7 +1002,8 @@ class TestObjectReplicator(unittest.TestCase):
# Write some files into '1' and run replicate- they should be moved
# to the other partitions and then node should get deleted.
cur_part = '1'
- df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o')
+ df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
+ policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
@@ -1004,10 +1014,11 @@ class TestObjectReplicator(unittest.TestCase):
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, cur_part, data_dir)
process_arg_checker = []
- ring = replicator.get_object_ring(0)
+ ring = replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(int(cur_part))
if node['ip'] not in _ips()]
+
for node in nodes:
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'],
cur_part)
@@ -1071,8 +1082,8 @@ class TestObjectReplicator(unittest.TestCase):
expect = 'Error syncing partition'
for job in jobs:
set_default(self)
- ring = self.replicator.get_object_ring(job['policy_idx'])
- self.headers['X-Backend-Storage-Policy-Index'] = job['policy_idx']
+ ring = job['policy'].object_ring
+ self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
self.replicator.update(job)
self.assertTrue(error in mock_logger.error.call_args[0][0])
self.assertTrue(expect in mock_logger.exception.call_args[0][0])
@@ -1118,7 +1129,7 @@ class TestObjectReplicator(unittest.TestCase):
for job in jobs:
set_default(self)
# limit local job to policy 0 for simplicity
- if job['partition'] == '0' and job['policy_idx'] == 0:
+ if job['partition'] == '0' and int(job['policy']) == 0:
local_job = job.copy()
continue
self.replicator.update(job)
diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py
index 1823a9014..52a34347a 100755
--- a/test/unit/obj/test_server.py
+++ b/test/unit/obj/test_server.py
@@ -18,6 +18,7 @@
import cPickle as pickle
import datetime
+import json
import errno
import operator
import os
@@ -39,17 +40,19 @@ from eventlet.green import httplib
from nose import SkipTest
from swift import __version__ as swift_version
+from swift.common.http import is_success
from test.unit import FakeLogger, debug_logger, mocked_http_conn
from test.unit import connect_tcp, readuntil2crlfs, patch_policies
from swift.obj import server as object_server
from swift.obj import diskfile
-from swift.common import utils, storage_policy, bufferedhttp
+from swift.common import utils, bufferedhttp
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
NullLogger, storage_directory, public, replication
from swift.common import constraints
-from swift.common.swob import Request, HeaderKeyDict
+from swift.common.swob import Request, HeaderKeyDict, WsgiStringIO
from swift.common.splice import splice
-from swift.common.storage_policy import POLICIES
+from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
+ POLICIES, EC_POLICY)
from swift.common.exceptions import DiskFileDeviceUnavailable
@@ -57,7 +60,14 @@ def mock_time(*args, **kwargs):
return 5000.0
-@patch_policies
+test_policies = [
+ StoragePolicy(0, name='zero', is_default=True),
+ ECStoragePolicy(1, name='one', ec_type='jerasure_rs_vand',
+ ec_ndata=10, ec_nparity=4),
+]
+
+
+@patch_policies(test_policies)
class TestObjectController(unittest.TestCase):
"""Test swift.obj.server.ObjectController"""
@@ -68,15 +78,18 @@ class TestObjectController(unittest.TestCase):
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
- conf = {'devices': self.testdir, 'mount_check': 'false'}
+ mkdirs(os.path.join(self.testdir, 'sda1'))
+ self.conf = {'devices': self.testdir, 'mount_check': 'false'}
self.object_controller = object_server.ObjectController(
- conf, logger=debug_logger())
+ self.conf, logger=debug_logger())
self.object_controller.bytes_per_sync = 1
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
- self.df_mgr = diskfile.DiskFileManager(conf,
+ self.df_mgr = diskfile.DiskFileManager(self.conf,
self.object_controller.logger)
+ self.logger = debug_logger('test-object-controller')
+
def tearDown(self):
"""Tear down for testing swift.object.server.ObjectController"""
rmtree(self.tmpdir)
@@ -84,7 +97,7 @@ class TestObjectController(unittest.TestCase):
def _stage_tmp_dir(self, policy):
mkdirs(os.path.join(self.testdir, 'sda1',
- diskfile.get_tmp_dir(int(policy))))
+ diskfile.get_tmp_dir(policy)))
def check_all_api_methods(self, obj_name='o', alt_res=None):
path = '/sda1/p/a/c/%s' % obj_name
@@ -417,7 +430,8 @@ class TestObjectController(unittest.TestCase):
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
- objfile = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o')
+ objfile = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
objfile.open()
file_name = os.path.basename(objfile._data_file)
with open(objfile._data_file) as fp:
@@ -568,7 +582,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0),
+ storage_directory(diskfile.get_data_dir(POLICIES[0]),
'p', hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assert_(os.path.isfile(objfile))
@@ -603,7 +617,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assert_(os.path.isfile(objfile))
@@ -638,7 +652,7 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
@@ -715,7 +729,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assert_(os.path.isfile(objfile))
@@ -729,6 +743,241 @@ class TestObjectController(unittest.TestCase):
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
+ def test_PUT_etag_in_footer(self):
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp,
+ 'Content-Type': 'text/plain',
+ 'Transfer-Encoding': 'chunked',
+ 'Etag': 'other-etag',
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
+ environ={'REQUEST_METHOD': 'PUT'})
+
+ obj_etag = md5("obj data").hexdigest()
+ footer_meta = json.dumps({"Etag": obj_etag})
+ footer_meta_cksum = md5(footer_meta).hexdigest()
+
+ req.body = "\r\n".join((
+ "--boundary",
+ "",
+ "obj data",
+ "--boundary",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_meta,
+ "--boundary--",
+ ))
+ req.headers.pop("Content-Length", None)
+
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.etag, obj_etag)
+ self.assertEqual(resp.status_int, 201)
+
+ objfile = os.path.join(
+ self.testdir, 'sda1',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
+ hash_path('a', 'c', 'o')),
+ utils.Timestamp(timestamp).internal + '.data')
+ with open(objfile) as fh:
+ self.assertEqual(fh.read(), "obj data")
+
+ def test_PUT_etag_in_footer_mismatch(self):
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp,
+ 'Content-Type': 'text/plain',
+ 'Transfer-Encoding': 'chunked',
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
+ environ={'REQUEST_METHOD': 'PUT'})
+
+ footer_meta = json.dumps({"Etag": md5("green").hexdigest()})
+ footer_meta_cksum = md5(footer_meta).hexdigest()
+
+ req.body = "\r\n".join((
+ "--boundary",
+ "",
+ "blue",
+ "--boundary",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_meta,
+ "--boundary--",
+ ))
+ req.headers.pop("Content-Length", None)
+
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 422)
+
+ def test_PUT_meta_in_footer(self):
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp,
+ 'Content-Type': 'text/plain',
+ 'Transfer-Encoding': 'chunked',
+ 'X-Object-Meta-X': 'Z',
+ 'X-Object-Sysmeta-X': 'Z',
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
+ environ={'REQUEST_METHOD': 'PUT'})
+
+ footer_meta = json.dumps({
+ 'X-Object-Meta-X': 'Y',
+ 'X-Object-Sysmeta-X': 'Y',
+ })
+ footer_meta_cksum = md5(footer_meta).hexdigest()
+
+ req.body = "\r\n".join((
+ "--boundary",
+ "",
+ "stuff stuff stuff",
+ "--boundary",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_meta,
+ "--boundary--",
+ ))
+ req.headers.pop("Content-Length", None)
+
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 201)
+
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp},
+ environ={'REQUEST_METHOD': 'HEAD'})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.headers.get('X-Object-Meta-X'), 'Y')
+ self.assertEqual(resp.headers.get('X-Object-Sysmeta-X'), 'Y')
+
+ def test_PUT_missing_footer_checksum(self):
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp,
+ 'Content-Type': 'text/plain',
+ 'Transfer-Encoding': 'chunked',
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
+ environ={'REQUEST_METHOD': 'PUT'})
+
+ footer_meta = json.dumps({"Etag": md5("obj data").hexdigest()})
+
+ req.body = "\r\n".join((
+ "--boundary",
+ "",
+ "obj data",
+ "--boundary",
+ # no Content-MD5
+ "",
+ footer_meta,
+ "--boundary--",
+ ))
+ req.headers.pop("Content-Length", None)
+
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 400)
+
+ def test_PUT_bad_footer_checksum(self):
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp,
+ 'Content-Type': 'text/plain',
+ 'Transfer-Encoding': 'chunked',
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
+ environ={'REQUEST_METHOD': 'PUT'})
+
+ footer_meta = json.dumps({"Etag": md5("obj data").hexdigest()})
+ bad_footer_meta_cksum = md5(footer_meta + "bad").hexdigest()
+
+ req.body = "\r\n".join((
+ "--boundary",
+ "",
+ "obj data",
+ "--boundary",
+ "Content-MD5: " + bad_footer_meta_cksum,
+ "",
+ footer_meta,
+ "--boundary--",
+ ))
+ req.headers.pop("Content-Length", None)
+
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 422)
+
+ def test_PUT_bad_footer_json(self):
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp,
+ 'Content-Type': 'text/plain',
+ 'Transfer-Encoding': 'chunked',
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
+ environ={'REQUEST_METHOD': 'PUT'})
+
+ footer_meta = "{{{[[{{[{[[{[{[[{{{[{{{{[[{{[{["
+ footer_meta_cksum = md5(footer_meta).hexdigest()
+
+ req.body = "\r\n".join((
+ "--boundary",
+ "",
+ "obj data",
+ "--boundary",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_meta,
+ "--boundary--",
+ ))
+ req.headers.pop("Content-Length", None)
+
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 400)
+
+ def test_PUT_extra_mime_docs_ignored(self):
+ timestamp = normalize_timestamp(time())
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ headers={'X-Timestamp': timestamp,
+ 'Content-Type': 'text/plain',
+ 'Transfer-Encoding': 'chunked',
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
+ environ={'REQUEST_METHOD': 'PUT'})
+
+ footer_meta = json.dumps({'X-Object-Meta-Mint': 'pepper'})
+ footer_meta_cksum = md5(footer_meta).hexdigest()
+
+ req.body = "\r\n".join((
+ "--boundary",
+ "",
+ "obj data",
+ "--boundary",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_meta,
+ "--boundary",
+ "This-Document-Is-Useless: yes",
+ "",
+ "blah blah I take up space",
+ "--boundary--"
+ ))
+ req.headers.pop("Content-Length", None)
+
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 201)
+
+ # swob made this into a StringIO for us
+ wsgi_input = req.environ['wsgi.input']
+ self.assertEqual(wsgi_input.tell(), len(wsgi_input.getvalue()))
+
def test_PUT_user_metadata_no_xattr(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
@@ -768,7 +1017,7 @@ class TestObjectController(unittest.TestCase):
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Length': '6'})
- req.environ['wsgi.input'] = StringIO('VERIFY')
+ req.environ['wsgi.input'] = WsgiStringIO('VERIFY')
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 408)
@@ -788,7 +1037,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assert_(os.path.isfile(objfile))
@@ -831,7 +1080,7 @@ class TestObjectController(unittest.TestCase):
# original .data file metadata should be unchanged
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
self.assert_(os.path.isfile(objfile))
@@ -849,7 +1098,7 @@ class TestObjectController(unittest.TestCase):
# .meta file metadata should have only user meta items
metafile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp2 + '.meta')
self.assert_(os.path.isfile(metafile))
@@ -1017,6 +1266,40 @@ class TestObjectController(unittest.TestCase):
finally:
object_server.http_connect = old_http_connect
+ def test_PUT_durable_files(self):
+ for policy in POLICIES:
+ timestamp = utils.Timestamp(int(time())).internal
+ data_file_tail = '.data'
+ headers = {'X-Timestamp': timestamp,
+ 'Content-Length': '6',
+ 'Content-Type': 'application/octet-stream',
+ 'X-Backend-Storage-Policy-Index': int(policy)}
+ if policy.policy_type == EC_POLICY:
+ headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
+ data_file_tail = '#2.data'
+ req = Request.blank(
+ '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers=headers)
+ req.body = 'VERIFY'
+ resp = req.get_response(self.object_controller)
+
+ self.assertEquals(resp.status_int, 201)
+ obj_dir = os.path.join(
+ self.testdir, 'sda1',
+ storage_directory(diskfile.get_data_dir(int(policy)),
+ 'p', hash_path('a', 'c', 'o')))
+ data_file = os.path.join(obj_dir, timestamp) + data_file_tail
+ self.assertTrue(os.path.isfile(data_file),
+ 'Expected file %r not found in %r for policy %r'
+ % (data_file, os.listdir(obj_dir), int(policy)))
+ durable_file = os.path.join(obj_dir, timestamp) + '.durable'
+ if policy.policy_type == EC_POLICY:
+ self.assertTrue(os.path.isfile(durable_file))
+ self.assertFalse(os.path.getsize(durable_file))
+ else:
+ self.assertFalse(os.path.isfile(durable_file))
+ rmtree(obj_dir)
+
def test_HEAD(self):
# Test swift.obj.server.ObjectController.HEAD
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
@@ -1058,7 +1341,7 @@ class TestObjectController(unittest.TestCase):
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
os.unlink(objfile)
@@ -1102,7 +1385,8 @@ class TestObjectController(unittest.TestCase):
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
- disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o')
+ disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
@@ -1133,7 +1417,7 @@ class TestObjectController(unittest.TestCase):
resp = server_handler.OPTIONS(req)
self.assertEquals(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE \
- REPLICATION'.split():
+ SSYNC'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEquals(len(resp.headers['Allow'].split(', ')), 8)
@@ -1201,7 +1485,7 @@ class TestObjectController(unittest.TestCase):
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
os.unlink(objfile)
@@ -1290,6 +1574,58 @@ class TestObjectController(unittest.TestCase):
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 412)
+ def test_GET_if_match_etag_is_at(self):
+ headers = {
+ 'X-Timestamp': utils.Timestamp(time()).internal,
+ 'Content-Type': 'application/octet-stream',
+ 'X-Object-Meta-Xtag': 'madeup',
+ }
+ req = Request.blank('/sda1/p/a/c/o', method='PUT',
+ headers=headers)
+ req.body = 'test'
+ resp = req.get_response(self.object_controller)
+ self.assertEquals(resp.status_int, 201)
+ real_etag = resp.etag
+
+ # match x-backend-etag-is-at
+ req = Request.blank('/sda1/p/a/c/o', headers={
+ 'If-Match': 'madeup',
+ 'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 200)
+
+ # no match x-backend-etag-is-at
+ req = Request.blank('/sda1/p/a/c/o', headers={
+ 'If-Match': real_etag,
+ 'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 412)
+
+ # etag-is-at metadata doesn't exist, default to real etag
+ req = Request.blank('/sda1/p/a/c/o', headers={
+ 'If-Match': real_etag,
+ 'X-Backend-Etag-Is-At': 'X-Object-Meta-Missing'})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 200)
+
+ # sanity no-match with no etag-is-at
+ req = Request.blank('/sda1/p/a/c/o', headers={
+ 'If-Match': 'madeup'})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 412)
+
+ # sanity match with no etag-is-at
+ req = Request.blank('/sda1/p/a/c/o', headers={
+ 'If-Match': real_etag})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 200)
+
+ # sanity with no if-match
+ req = Request.blank('/sda1/p/a/c/o', headers={
+ 'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 200)
+
def test_HEAD_if_match(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
@@ -1692,7 +2028,8 @@ class TestObjectController(unittest.TestCase):
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
- disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o')
+ disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
etag = md5()
@@ -1724,7 +2061,8 @@ class TestObjectController(unittest.TestCase):
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
- disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o')
+ disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
with open(disk_file._data_file) as fp:
@@ -1752,7 +2090,8 @@ class TestObjectController(unittest.TestCase):
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
- disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o')
+ disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
+ policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
etag = md5()
@@ -1810,7 +2149,6 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'DELETE'})
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 400)
- # self.assertRaises(KeyError, self.object_controller.DELETE, req)
# The following should have created a tombstone file
timestamp = normalize_timestamp(1000)
@@ -1821,7 +2159,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
ts_1000_file = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(ts_1000_file))
@@ -1837,7 +2175,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
ts_999_file = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(ts_999_file))
@@ -1857,7 +2195,7 @@ class TestObjectController(unittest.TestCase):
# There should now be 1000 ts and a 1001 data file.
data_1002_file = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
orig_timestamp + '.data')
self.assertTrue(os.path.isfile(data_1002_file))
@@ -1873,7 +2211,7 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
ts_1001_file = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(ts_1001_file))
@@ -1888,7 +2226,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 204)
ts_1003_file = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(ts_1003_file))
@@ -1930,7 +2268,7 @@ class TestObjectController(unittest.TestCase):
orig_timestamp.internal)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(objfile))
@@ -1949,7 +2287,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 204)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assert_(os.path.isfile(objfile))
@@ -1968,7 +2306,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assert_(os.path.isfile(objfile))
@@ -1987,7 +2325,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(objfile))
@@ -2184,7 +2522,7 @@ class TestObjectController(unittest.TestCase):
def test_call_bad_request(self):
# Test swift.obj.server.ObjectController.__call__
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
@@ -2211,7 +2549,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(outbuf.getvalue()[:4], '400 ')
def test_call_not_found(self):
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
@@ -2238,7 +2576,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(outbuf.getvalue()[:4], '404 ')
def test_call_bad_method(self):
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
@@ -2274,7 +2612,7 @@ class TestObjectController(unittest.TestCase):
with mock.patch("swift.obj.diskfile.hash_path", my_hash_path):
with mock.patch("swift.obj.server.check_object_creation",
my_check):
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
@@ -2303,7 +2641,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '201 ')
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
@@ -2454,6 +2792,9 @@ class TestObjectController(unittest.TestCase):
return ' '
return ''
+ def set_hundred_continue_response_headers(*a, **kw):
+ pass
+
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': SlowBody()},
@@ -2483,6 +2824,9 @@ class TestObjectController(unittest.TestCase):
return ' '
return ''
+ def set_hundred_continue_response_headers(*a, **kw):
+ pass
+
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': ShortBody()},
@@ -2554,8 +2898,8 @@ class TestObjectController(unittest.TestCase):
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': 'set',
- 'X-Backend-Storage-Policy-Index': policy.idx}, 'sda1',
- policy.idx)
+ 'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
+ policy)
finally:
object_server.http_connect = orig_http_connect
self.assertEquals(
@@ -2563,12 +2907,15 @@ class TestObjectController(unittest.TestCase):
['127.0.0.1', '1234', 'sdc1', 1, 'PUT', '/a/c/o', {
'x-timestamp': '1', 'x-out': 'set',
'user-agent': 'object-server %s' % os.getpid(),
- 'X-Backend-Storage-Policy-Index': policy.idx}])
+ 'X-Backend-Storage-Policy-Index': int(policy)}])
- @patch_policies([storage_policy.StoragePolicy(0, 'zero', True),
- storage_policy.StoragePolicy(1, 'one'),
- storage_policy.StoragePolicy(37, 'fantastico')])
+ @patch_policies([StoragePolicy(0, 'zero', True),
+ StoragePolicy(1, 'one'),
+ StoragePolicy(37, 'fantastico')])
def test_updating_multiple_delete_at_container_servers(self):
+ # update router post patch
+ self.object_controller._diskfile_router = diskfile.DiskFileRouter(
+ self.conf, self.object_controller.logger)
policy = random.choice(list(POLICIES))
self.object_controller.expiring_objects_account = 'exp'
self.object_controller.expiring_objects_container_divisor = 60
@@ -2607,7 +2954,7 @@ class TestObjectController(unittest.TestCase):
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
- 'X-Backend-Storage-Policy-Index': policy.idx,
+ 'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5',
'X-Container-Device': 'sdb1',
@@ -2643,7 +2990,7 @@ class TestObjectController(unittest.TestCase):
'X-Backend-Storage-Policy-Index': '37',
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
- 'X-Backend-Storage-Policy-Index': policy.idx,
+ 'X-Backend-Storage-Policy-Index': int(policy),
'x-trans-id': '-'})})
self.assertEquals(
http_connect_args[1],
@@ -2684,10 +3031,13 @@ class TestObjectController(unittest.TestCase):
'X-Backend-Storage-Policy-Index': 0,
'x-trans-id': '-'})})
- @patch_policies([storage_policy.StoragePolicy(0, 'zero', True),
- storage_policy.StoragePolicy(1, 'one'),
- storage_policy.StoragePolicy(26, 'twice-thirteen')])
+ @patch_policies([StoragePolicy(0, 'zero', True),
+ StoragePolicy(1, 'one'),
+ StoragePolicy(26, 'twice-thirteen')])
def test_updating_multiple_container_servers(self):
+ # update router post patch
+ self.object_controller._diskfile_router = diskfile.DiskFileRouter(
+ self.conf, self.object_controller.logger)
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
@@ -2788,7 +3138,7 @@ class TestObjectController(unittest.TestCase):
int(delete_at_timestamp) /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
- req = Request.blank('/sda1/p/a/c/o', method='PUT', body='', headers={
+ headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'X-Container-Host': '10.0.0.1:6001',
@@ -2799,8 +3149,11 @@ class TestObjectController(unittest.TestCase):
'X-Delete-At-Partition': 'p',
'X-Delete-At-Host': '10.0.0.2:6002',
'X-Delete-At-Device': 'sda1',
- 'X-Backend-Storage-Policy-Index': int(policy),
- })
+ 'X-Backend-Storage-Policy-Index': int(policy)}
+ if policy.policy_type == EC_POLICY:
+ headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
+ req = Request.blank(
+ '/sda1/p/a/c/o', method='PUT', body='', headers=headers)
with mocked_http_conn(
500, 500, give_connect=capture_updates) as fake_conn:
resp = req.get_response(self.object_controller)
@@ -2836,7 +3189,7 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(headers[key], str(value))
# check async pendings
async_dir = os.path.join(self.testdir, 'sda1',
- diskfile.get_async_dir(policy.idx))
+ diskfile.get_async_dir(policy))
found_files = []
for root, dirs, files in os.walk(async_dir):
for f in files:
@@ -2846,7 +3199,7 @@ class TestObjectController(unittest.TestCase):
if data['account'] == 'a':
self.assertEquals(
int(data['headers']
- ['X-Backend-Storage-Policy-Index']), policy.idx)
+ ['X-Backend-Storage-Policy-Index']), int(policy))
elif data['account'] == '.expiring_objects':
self.assertEquals(
int(data['headers']
@@ -2870,12 +3223,12 @@ class TestObjectController(unittest.TestCase):
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': 'set',
- 'X-Backend-Storage-Policy-Index': policy.idx}, 'sda1',
- policy.idx)
+ 'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
+ policy)
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
- async_dir = diskfile.get_async_dir(policy.idx)
+ async_dir = diskfile.get_async_dir(policy)
self.assertEquals(
pickle.load(open(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
@@ -2883,7 +3236,7 @@ class TestObjectController(unittest.TestCase):
utils.Timestamp(1).internal))),
{'headers': {'x-timestamp': '1', 'x-out': 'set',
'user-agent': 'object-server %s' % os.getpid(),
- 'X-Backend-Storage-Policy-Index': policy.idx},
+ 'X-Backend-Storage-Policy-Index': int(policy)},
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
def test_async_update_saves_on_non_2xx(self):
@@ -2914,9 +3267,9 @@ class TestObjectController(unittest.TestCase):
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status),
- 'X-Backend-Storage-Policy-Index': policy.idx}, 'sda1',
- policy.idx)
- async_dir = diskfile.get_async_dir(policy.idx)
+ 'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
+ policy)
+ async_dir = diskfile.get_async_dir(policy)
self.assertEquals(
pickle.load(open(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
@@ -2926,7 +3279,7 @@ class TestObjectController(unittest.TestCase):
'user-agent':
'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index':
- policy.idx},
+ int(policy)},
'account': 'a', 'container': 'c', 'obj': 'o',
'op': 'PUT'})
finally:
@@ -2990,8 +3343,8 @@ class TestObjectController(unittest.TestCase):
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status)}, 'sda1',
- policy.idx)
- async_dir = diskfile.get_async_dir(int(policy))
+ policy)
+ async_dir = diskfile.get_async_dir(policy)
self.assertTrue(
os.path.exists(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
@@ -3002,6 +3355,7 @@ class TestObjectController(unittest.TestCase):
utils.HASH_PATH_PREFIX = _prefix
def test_container_update_no_async_update(self):
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3012,12 +3366,13 @@ class TestObjectController(unittest.TestCase):
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
- 'X-Trans-Id': '1234'})
+ 'X-Trans-Id': '1234',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.container_update(
'PUT', 'a', 'c', 'o', req, {
'x-size': '0', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain', 'x-timestamp': '1'},
- 'sda1', 0)
+ 'sda1', policy)
self.assertEquals(given_args, [])
def test_container_update_success(self):
@@ -3099,6 +3454,7 @@ class TestObjectController(unittest.TestCase):
'x-foo': 'bar'}))
def test_container_update_async(self):
+ policy = random.choice(list(POLICIES))
req = Request.blank(
'/sda1/0/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
@@ -3107,26 +3463,28 @@ class TestObjectController(unittest.TestCase):
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
- 'Content-Type': 'text/plain'}, body='')
+ 'Content-Type': 'text/plain',
+ 'X-Object-Sysmeta-Ec-Frag-Index': 0,
+ 'X-Backend-Storage-Policy-Index': int(policy)}, body='')
given_args = []
def fake_pickle_async_update(*args):
given_args[:] = args
- self.object_controller._diskfile_mgr.pickle_async_update = \
- fake_pickle_async_update
+ diskfile_mgr = self.object_controller._diskfile_router[policy]
+ diskfile_mgr.pickle_async_update = fake_pickle_async_update
with mocked_http_conn(500) as fake_conn:
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(given_args), 7)
(objdevice, account, container, obj, data, timestamp,
- policy_index) = given_args
+ policy) = given_args
self.assertEqual(objdevice, 'sda1')
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
self.assertEqual(timestamp, utils.Timestamp(1).internal)
- self.assertEqual(policy_index, 0)
+ self.assertEqual(policy, policy)
self.assertEqual(data, {
'headers': HeaderKeyDict({
'X-Size': '0',
@@ -3135,7 +3493,7 @@ class TestObjectController(unittest.TestCase):
'X-Timestamp': utils.Timestamp(1).internal,
'X-Trans-Id': '123',
'Referer': 'PUT http://localhost/sda1/0/a/c/o',
- 'X-Backend-Storage-Policy-Index': '0',
+ 'X-Backend-Storage-Policy-Index': int(policy),
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'}),
'obj': 'o',
'account': 'a',
@@ -3143,6 +3501,7 @@ class TestObjectController(unittest.TestCase):
'op': 'PUT'})
def test_container_update_bad_args(self):
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3155,7 +3514,8 @@ class TestObjectController(unittest.TestCase):
'X-Trans-Id': '123',
'X-Container-Host': 'chost,badhost',
'X-Container-Partition': 'cpartition',
- 'X-Container-Device': 'cdevice'})
+ 'X-Container-Device': 'cdevice',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch.object(self.object_controller, 'async_update',
fake_async_update):
self.object_controller.container_update(
@@ -3163,7 +3523,7 @@ class TestObjectController(unittest.TestCase):
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain', 'x-timestamp': '1'},
- 'sda1', 0)
+ 'sda1', policy)
self.assertEqual(given_args, [])
errors = self.object_controller.logger.get_lines_for_level('error')
self.assertEqual(len(errors), 1)
@@ -3176,6 +3536,7 @@ class TestObjectController(unittest.TestCase):
def test_delete_at_update_on_put(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3185,11 +3546,12 @@ class TestObjectController(unittest.TestCase):
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
- 'X-Trans-Id': '123'})
+ 'X-Trans-Id': '123',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch.object(self.object_controller, 'async_update',
fake_async_update):
self.object_controller.delete_at_update(
- 'DELETE', 2, 'a', 'c', 'o', req, 'sda1', 0)
+ 'DELETE', 2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEquals(
given_args, [
'DELETE', '.expiring_objects', '0000000000',
@@ -3199,12 +3561,13 @@ class TestObjectController(unittest.TestCase):
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '123',
'referer': 'PUT http://localhost/v1/a/c/o'}),
- 'sda1', 0])
+ 'sda1', policy])
def test_delete_at_negative(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
# Test negative is reset to 0
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3215,23 +3578,26 @@ class TestObjectController(unittest.TestCase):
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
- 'X-Trans-Id': '1234'})
+ 'X-Trans-Id': '1234', 'X-Backend-Storage-Policy-Index':
+ int(policy)})
self.object_controller.delete_at_update(
- 'DELETE', -2, 'a', 'c', 'o', req, 'sda1', 0)
+ 'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEquals(given_args, [
'DELETE', '.expiring_objects', '0000000000', '0000000000-a/c/o',
None, None, None,
HeaderKeyDict({
+ # the expiring objects account is always 0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
- 'sda1', 0])
+ 'sda1', policy])
def test_delete_at_cap(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
# Test past cap is reset to cap
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3242,9 +3608,10 @@ class TestObjectController(unittest.TestCase):
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
- 'X-Trans-Id': '1234'})
+ 'X-Trans-Id': '1234',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update(
- 'DELETE', 12345678901, 'a', 'c', 'o', req, 'sda1', 0)
+ 'DELETE', 12345678901, 'a', 'c', 'o', req, 'sda1', policy)
expiring_obj_container = given_args.pop(2)
expected_exp_cont = utils.get_expirer_container(
utils.normalize_delete_at_timestamp(12345678901),
@@ -3259,12 +3626,13 @@ class TestObjectController(unittest.TestCase):
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
- 'sda1', 0])
+ 'sda1', policy])
def test_delete_at_update_put_with_info(self):
# Keep next test,
# test_delete_at_update_put_with_info_but_missing_container, in sync
# with this one but just missing the X-Delete-At-Container header.
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3279,14 +3647,16 @@ class TestObjectController(unittest.TestCase):
'X-Delete-At-Container': '0',
'X-Delete-At-Host': '127.0.0.1:1234',
'X-Delete-At-Partition': '3',
- 'X-Delete-At-Device': 'sdc1'})
+ 'X-Delete-At-Device': 'sdc1',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
- req, 'sda1', 0)
+ req, 'sda1', policy)
self.assertEquals(
given_args, [
'PUT', '.expiring_objects', '0000000000', '0000000002-a/c/o',
'127.0.0.1:1234',
'3', 'sdc1', HeaderKeyDict({
+ # the .expiring_objects account is always policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
@@ -3294,18 +3664,19 @@ class TestObjectController(unittest.TestCase):
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
- 'sda1', 0])
+ 'sda1', policy])
def test_delete_at_update_put_with_info_but_missing_container(self):
# Same as previous test, test_delete_at_update_put_with_info, but just
# missing the X-Delete-At-Container header.
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
- self.object_controller.logger = FakeLogger()
+ self.object_controller.logger = self.logger
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
@@ -3313,16 +3684,18 @@ class TestObjectController(unittest.TestCase):
'X-Trans-Id': '1234',
'X-Delete-At-Host': '127.0.0.1:1234',
'X-Delete-At-Partition': '3',
- 'X-Delete-At-Device': 'sdc1'})
+ 'X-Delete-At-Device': 'sdc1',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
- req, 'sda1', 0)
+ req, 'sda1', policy)
self.assertEquals(
- self.object_controller.logger.log_dict['warning'],
- [(('X-Delete-At-Container header must be specified for expiring '
- 'objects background PUT to work properly. Making best guess as '
- 'to the container name for now.',), {})])
+ self.logger.get_lines_for_level('warning'),
+ ['X-Delete-At-Container header must be specified for expiring '
+ 'objects background PUT to work properly. Making best guess as '
+ 'to the container name for now.'])
def test_delete_at_update_delete(self):
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3333,9 +3706,10 @@ class TestObjectController(unittest.TestCase):
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 1,
- 'X-Trans-Id': '1234'})
+ 'X-Trans-Id': '1234',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('DELETE', 2, 'a', 'c', 'o',
- req, 'sda1', 0)
+ req, 'sda1', policy)
self.assertEquals(
given_args, [
'DELETE', '.expiring_objects', '0000000000',
@@ -3345,11 +3719,12 @@ class TestObjectController(unittest.TestCase):
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'DELETE http://localhost/v1/a/c/o'}),
- 'sda1', 0])
+ 'sda1', policy])
def test_delete_backend_replication(self):
# If X-Backend-Replication: True delete_at_update should completely
# short-circuit.
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
@@ -3361,12 +3736,14 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
- 'X-Backend-Replication': 'True'})
+ 'X-Backend-Replication': 'True',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update(
- 'DELETE', -2, 'a', 'c', 'o', req, 'sda1', 0)
+ 'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEquals(given_args, [])
def test_POST_calls_delete_at(self):
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_delete_at_update(*args):
@@ -3378,7 +3755,9 @@ class TestObjectController(unittest.TestCase):
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
- 'Content-Type': 'application/octet-stream'})
+ 'Content-Type': 'application/octet-stream',
+ 'X-Backend-Storage-Policy-Index': int(policy),
+ 'X-Object-Sysmeta-Ec-Frag-Index': 2})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
@@ -3389,7 +3768,8 @@ class TestObjectController(unittest.TestCase):
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time()),
- 'Content-Type': 'application/x-test'})
+ 'Content-Type': 'application/x-test',
+ 'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 202)
self.assertEquals(given_args, [])
@@ -3402,13 +3782,14 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'application/x-test',
- 'X-Delete-At': delete_at_timestamp1})
+ 'X-Delete-At': delete_at_timestamp1,
+ 'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 202)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
- given_args[5], 'sda1', 0])
+ given_args[5], 'sda1', policy])
while given_args:
given_args.pop()
@@ -3421,17 +3802,19 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'application/x-test',
- 'X-Delete-At': delete_at_timestamp2})
+ 'X-Delete-At': delete_at_timestamp2,
+ 'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 202)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
- given_args[5], 'sda1', 0,
+ given_args[5], 'sda1', policy,
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
- given_args[5], 'sda1', 0])
+ given_args[5], 'sda1', policy])
def test_PUT_calls_delete_at(self):
+ policy = random.choice(list(POLICIES))
given_args = []
def fake_delete_at_update(*args):
@@ -3443,7 +3826,9 @@ class TestObjectController(unittest.TestCase):
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
- 'Content-Type': 'application/octet-stream'})
+ 'Content-Type': 'application/octet-stream',
+ 'X-Backend-Storage-Policy-Index': int(policy),
+ 'X-Object-Sysmeta-Ec-Frag-Index': 4})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
@@ -3457,14 +3842,16 @@ class TestObjectController(unittest.TestCase):
headers={'X-Timestamp': timestamp1,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
- 'X-Delete-At': delete_at_timestamp1})
+ 'X-Delete-At': delete_at_timestamp1,
+ 'X-Backend-Storage-Policy-Index': int(policy),
+ 'X-Object-Sysmeta-Ec-Frag-Index': 3})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
- given_args[5], 'sda1', 0])
+ given_args[5], 'sda1', policy])
while given_args:
given_args.pop()
@@ -3478,16 +3865,18 @@ class TestObjectController(unittest.TestCase):
headers={'X-Timestamp': timestamp2,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
- 'X-Delete-At': delete_at_timestamp2})
+ 'X-Delete-At': delete_at_timestamp2,
+ 'X-Backend-Storage-Policy-Index': int(policy),
+ 'X-Object-Sysmeta-Ec-Frag-Index': 3})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
- given_args[5], 'sda1', 0,
+ given_args[5], 'sda1', policy,
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
- given_args[5], 'sda1', 0])
+ given_args[5], 'sda1', policy])
def test_GET_but_expired(self):
test_time = time() + 10000
@@ -3742,7 +4131,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.body, 'TEST')
objfile = os.path.join(
self.testdir, 'sda1',
- storage_directory(diskfile.get_data_dir(0), 'p',
+ storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(test_timestamp).internal + '.data')
self.assert_(os.path.isfile(objfile))
@@ -3909,7 +4298,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
- given_args[5], 'sda1', 0])
+ given_args[5], 'sda1', POLICIES[0]])
while given_args:
given_args.pop()
@@ -3925,7 +4314,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 204)
self.assertEquals(given_args, [
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
- given_args[5], 'sda1', 0])
+ given_args[5], 'sda1', POLICIES[0]])
def test_PUT_delete_at_in_past(self):
req = Request.blank(
@@ -3967,10 +4356,10 @@ class TestObjectController(unittest.TestCase):
def my_tpool_execute(func, *args, **kwargs):
return func(*args, **kwargs)
- was_get_hashes = diskfile.get_hashes
+ was_get_hashes = diskfile.DiskFileManager._get_hashes
was_tpool_exe = tpool.execute
try:
- diskfile.get_hashes = fake_get_hashes
+ diskfile.DiskFileManager._get_hashes = fake_get_hashes
tpool.execute = my_tpool_execute
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
@@ -3981,7 +4370,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(p_data, {1: 2})
finally:
tpool.execute = was_tpool_exe
- diskfile.get_hashes = was_get_hashes
+ diskfile.DiskFileManager._get_hashes = was_get_hashes
def test_REPLICATE_timeout(self):
@@ -3991,10 +4380,10 @@ class TestObjectController(unittest.TestCase):
def my_tpool_execute(func, *args, **kwargs):
return func(*args, **kwargs)
- was_get_hashes = diskfile.get_hashes
+ was_get_hashes = diskfile.DiskFileManager._get_hashes
was_tpool_exe = tpool.execute
try:
- diskfile.get_hashes = fake_get_hashes
+ diskfile.DiskFileManager._get_hashes = fake_get_hashes
tpool.execute = my_tpool_execute
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
@@ -4002,7 +4391,7 @@ class TestObjectController(unittest.TestCase):
self.assertRaises(Timeout, self.object_controller.REPLICATE, req)
finally:
tpool.execute = was_tpool_exe
- diskfile.get_hashes = was_get_hashes
+ diskfile.DiskFileManager._get_hashes = was_get_hashes
def test_REPLICATE_insufficient_storage(self):
conf = {'devices': self.testdir, 'mount_check': 'true'}
@@ -4020,9 +4409,9 @@ class TestObjectController(unittest.TestCase):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
- def test_REPLICATION_can_be_called(self):
+ def test_SSYNC_can_be_called(self):
req = Request.blank('/sda1/p/other/suff',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
headers={})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
@@ -4113,7 +4502,7 @@ class TestObjectController(unittest.TestCase):
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
- repl_methods = ['REPLICATE', 'REPLICATION']
+ repl_methods = ['REPLICATE', 'SSYNC']
for method_name in obj_methods:
method = getattr(self.object_controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
@@ -4124,7 +4513,7 @@ class TestObjectController(unittest.TestCase):
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.obj.server.ObjectController.__call__
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.app_factory(
@@ -4162,12 +4551,12 @@ class TestObjectController(unittest.TestCase):
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.obj.server.ObjectController.__call__
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
- 'replication_server': 'false'}, logger=FakeLogger())
+ 'replication_server': 'false'}, logger=self.logger)
def start_response(*args):
# Sends args to outbuf
@@ -4207,11 +4596,10 @@ class TestObjectController(unittest.TestCase):
env, start_response)
self.assertEqual(response, answer)
self.assertEqual(
- self.object_controller.logger.log_dict['info'],
- [(('None - - [01/Jan/1970:02:46:41 +0000] "PUT'
- ' /sda1/p/a/c/o" 405 - "-" "-" "-" 1.0000 "-"'
- ' 1234 -',),
- {})])
+ self.logger.get_lines_for_level('info'),
+ ['None - - [01/Jan/1970:02:46:41 +0000] "PUT'
+ ' /sda1/p/a/c/o" 405 - "-" "-" "-" 1.0000 "-"'
+ ' 1234 -'])
def test_call_incorrect_replication_method(self):
inbuf = StringIO()
@@ -4246,7 +4634,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_not_utf8_and_not_logging_requests(self):
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
@@ -4281,17 +4669,17 @@ class TestObjectController(unittest.TestCase):
new=mock_method):
response = self.object_controller.__call__(env, start_response)
self.assertEqual(response, answer)
- self.assertEqual(self.object_controller.logger.log_dict['info'],
- [])
+ self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test__call__returns_500(self):
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
+ self.logger = debug_logger('test')
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
- logger=FakeLogger())
+ logger=self.logger)
def start_response(*args):
# Sends args to outbuf
@@ -4323,24 +4711,21 @@ class TestObjectController(unittest.TestCase):
response = self.object_controller.__call__(env, start_response)
self.assertTrue(response[0].startswith(
'Traceback (most recent call last):'))
- self.assertEqual(
- self.object_controller.logger.log_dict['exception'],
- [(('ERROR __call__ error with %(method)s %(path)s ',
- {'method': 'PUT', 'path': '/sda1/p/a/c/o'}),
- {},
- '')])
- self.assertEqual(self.object_controller.logger.log_dict['INFO'],
- [])
+ self.assertEqual(self.logger.get_lines_for_level('error'), [
+ 'ERROR __call__ error with %(method)s %(path)s : ' % {
+ 'method': 'PUT', 'path': '/sda1/p/a/c/o'},
+ ])
+ self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test_PUT_slow(self):
- inbuf = StringIO()
+ inbuf = WsgiStringIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false',
'slow': '10'},
- logger=FakeLogger())
+ logger=self.logger)
def start_response(*args):
# Sends args to outbuf
@@ -4373,14 +4758,14 @@ class TestObjectController(unittest.TestCase):
mock.MagicMock()) as ms:
self.object_controller.__call__(env, start_response)
ms.assert_called_with(9)
- self.assertEqual(
- self.object_controller.logger.log_dict['info'], [])
+ self.assertEqual(self.logger.get_lines_for_level('info'),
+ [])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
- self.object_controller.logger = FakeLogger()
+ self.object_controller.logger = self.logger
with mock.patch(
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch(
@@ -4390,13 +4775,16 @@ class TestObjectController(unittest.TestCase):
'os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.object_controller)
self.assertEqual(
- self.object_controller.logger.log_dict['info'],
- [(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c/o" '
- '404 - "-" "-" "-" 2.0000 "-" 1234 -',), {})])
+ self.logger.get_lines_for_level('info'),
+ ['1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c/o" '
+ '404 - "-" "-" "-" 2.0000 "-" 1234 -'])
- @patch_policies([storage_policy.StoragePolicy(0, 'zero', True),
- storage_policy.StoragePolicy(1, 'one', False)])
+ @patch_policies([StoragePolicy(0, 'zero', True),
+ StoragePolicy(1, 'one', False)])
def test_dynamic_datadir(self):
+ # update router post patch
+ self.object_controller._diskfile_router = diskfile.DiskFileRouter(
+ self.conf, self.object_controller.logger)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
@@ -4430,7 +4818,50 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertTrue(os.path.isdir(object_dir))
+ def test_storage_policy_index_is_validated(self):
+ # sanity check that index for existing policy is ok
+ ts = (utils.Timestamp(t).internal for t in
+ itertools.count(int(time())))
+ methods = ('PUT', 'POST', 'GET', 'HEAD', 'REPLICATE', 'DELETE')
+ valid_indices = sorted([int(policy) for policy in POLICIES])
+ for index in valid_indices:
+ object_dir = self.testdir + "/sda1/objects"
+ if index > 0:
+ object_dir = "%s-%s" % (object_dir, index)
+ self.assertFalse(os.path.isdir(object_dir))
+ for method in methods:
+ headers = {
+ 'X-Timestamp': ts.next(),
+ 'Content-Type': 'application/x-test',
+ 'X-Backend-Storage-Policy-Index': index}
+ if POLICIES[index].policy_type == EC_POLICY:
+ headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ environ={'REQUEST_METHOD': method},
+ headers=headers)
+ req.body = 'VERIFY'
+ resp = req.get_response(self.object_controller)
+ self.assertTrue(is_success(resp.status_int),
+ '%s method failed: %r' % (method, resp.status))
+
+ # index for non-existent policy should return 503
+ index = valid_indices[-1] + 1
+ for method in methods:
+ req = Request.blank('/sda1/p/a/c/o',
+ environ={'REQUEST_METHOD': method},
+ headers={
+ 'X-Timestamp': ts.next(),
+ 'Content-Type': 'application/x-test',
+ 'X-Backend-Storage-Policy-Index': index})
+ req.body = 'VERIFY'
+ object_dir = self.testdir + "/sda1/objects-%s" % index
+ resp = req.get_response(self.object_controller)
+ self.assertEquals(resp.status_int, 503)
+ self.assertFalse(os.path.isdir(object_dir))
+
+@patch_policies(test_policies)
class TestObjectServer(unittest.TestCase):
def setUp(self):
@@ -4442,13 +4873,13 @@ class TestObjectServer(unittest.TestCase):
for device in ('sda1', 'sdb1'):
os.makedirs(os.path.join(self.devices, device))
- conf = {
+ self.conf = {
'devices': self.devices,
'swift_dir': self.tempdir,
'mount_check': 'false',
}
self.logger = debug_logger('test-object-server')
- app = object_server.ObjectController(conf, logger=self.logger)
+ app = object_server.ObjectController(self.conf, logger=self.logger)
sock = listen(('127.0.0.1', 0))
self.server = spawn(wsgi.server, sock, app, utils.NullLogger())
self.port = sock.getsockname()[1]
@@ -4481,6 +4912,23 @@ class TestObjectServer(unittest.TestCase):
resp.read()
resp.close()
+ def test_expect_on_put_footer(self):
+ test_body = 'test'
+ headers = {
+ 'Expect': '100-continue',
+ 'Content-Length': len(test_body),
+ 'X-Timestamp': utils.Timestamp(time()).internal,
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
+ }
+ conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
+ 'PUT', '/a/c/o', headers=headers)
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+ headers = HeaderKeyDict(resp.getheaders())
+ self.assertEqual(headers['X-Obj-Metadata-Footer'], 'yes')
+ resp.close()
+
def test_expect_on_put_conflict(self):
test_body = 'test'
put_timestamp = utils.Timestamp(time())
@@ -4509,7 +4957,379 @@ class TestObjectServer(unittest.TestCase):
resp.read()
resp.close()
+ def test_multiphase_put_no_mime_boundary(self):
+ test_data = 'obj data'
+ put_timestamp = utils.Timestamp(time()).internal
+ headers = {
+ 'Content-Type': 'text/plain',
+ 'X-Timestamp': put_timestamp,
+ 'Transfer-Encoding': 'chunked',
+ 'Expect': '100-continue',
+ 'X-Backend-Obj-Content-Length': len(test_data),
+ 'X-Backend-Obj-Multiphase-Commit': 'yes',
+ }
+ conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
+ 'PUT', '/a/c/o', headers=headers)
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 400)
+ resp.read()
+ resp.close()
+
+ def test_expect_on_multiphase_put(self):
+ test_data = 'obj data'
+ test_doc = "\r\n".join((
+ "--boundary123",
+ "X-Document: object body",
+ "",
+ test_data,
+ "--boundary123",
+ ))
+
+ put_timestamp = utils.Timestamp(time()).internal
+ headers = {
+ 'Content-Type': 'text/plain',
+ 'X-Timestamp': put_timestamp,
+ 'Transfer-Encoding': 'chunked',
+ 'Expect': '100-continue',
+ 'X-Backend-Obj-Content-Length': len(test_data),
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
+ 'X-Backend-Obj-Multiphase-Commit': 'yes',
+ }
+ conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
+ 'PUT', '/a/c/o', headers=headers)
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+ headers = HeaderKeyDict(resp.getheaders())
+ self.assertEqual(headers['X-Obj-Multiphase-Commit'], 'yes')
+
+ to_send = "%x\r\n%s\r\n0\r\n\r\n" % (len(test_doc), test_doc)
+ conn.send(to_send)
+
+ # verify 100-continue response to mark end of phase1
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+ resp.close()
+
+ def test_multiphase_put_metadata_footer(self):
+ # Test 2-phase commit conversation - end of 1st phase marked
+ # by 100-continue response from the object server, with a
+ # successful 2nd phase marked by the presence of a .durable
+ # file along with .data file in the object data directory
+ test_data = 'obj data'
+ footer_meta = {
+ "X-Object-Sysmeta-Ec-Frag-Index": "2",
+ "Etag": md5(test_data).hexdigest(),
+ }
+ footer_json = json.dumps(footer_meta)
+ footer_meta_cksum = md5(footer_json).hexdigest()
+ test_doc = "\r\n".join((
+ "--boundary123",
+ "X-Document: object body",
+ "",
+ test_data,
+ "--boundary123",
+ "X-Document: object metadata",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_json,
+ "--boundary123",
+ ))
+
+ # phase1 - PUT request with object metadata in footer and
+ # multiphase commit conversation
+ put_timestamp = utils.Timestamp(time()).internal
+ headers = {
+ 'Content-Type': 'text/plain',
+ 'X-Timestamp': put_timestamp,
+ 'Transfer-Encoding': 'chunked',
+ 'Expect': '100-continue',
+ 'X-Backend-Storage-Policy-Index': '1',
+ 'X-Backend-Obj-Content-Length': len(test_data),
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
+ 'X-Backend-Obj-Multiphase-Commit': 'yes',
+ }
+ conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
+ 'PUT', '/a/c/o', headers=headers)
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+ headers = HeaderKeyDict(resp.getheaders())
+ self.assertEqual(headers['X-Obj-Multiphase-Commit'], 'yes')
+ self.assertEqual(headers['X-Obj-Metadata-Footer'], 'yes')
+
+ to_send = "%x\r\n%s\r\n0\r\n\r\n" % (len(test_doc), test_doc)
+ conn.send(to_send)
+ # verify 100-continue response to mark end of phase1
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+
+ # send commit confirmation to start phase2
+ commit_confirmation_doc = "\r\n".join((
+ "X-Document: put commit",
+ "",
+ "commit_confirmation",
+ "--boundary123--",
+ ))
+ to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
+ (len(commit_confirmation_doc), commit_confirmation_doc)
+ conn.send(to_send)
+
+ # verify success (2xx) to make end of phase2
+ resp = conn.getresponse()
+ self.assertEqual(resp.status, 201)
+ resp.read()
+ resp.close()
+
+ # verify successful object data and durable state file write
+ obj_basename = os.path.join(
+ self.devices, 'sda1',
+ storage_directory(diskfile.get_data_dir(POLICIES[1]), '0',
+ hash_path('a', 'c', 'o')),
+ put_timestamp)
+ obj_datafile = obj_basename + '#2.data'
+ self.assertTrue(os.path.isfile(obj_datafile))
+ obj_durablefile = obj_basename + '.durable'
+ self.assertTrue(os.path.isfile(obj_durablefile))
+
+ def test_multiphase_put_no_metadata_footer(self):
+ # Test 2-phase commit conversation, with no metadata footer
+ # at the end of object data - end of 1st phase marked
+ # by 100-continue response from the object server, with a
+ # successful 2nd phase marked by the presence of a .durable
+ # file along with .data file in the object data directory
+ # (No metadata footer case)
+ test_data = 'obj data'
+ test_doc = "\r\n".join((
+ "--boundary123",
+ "X-Document: object body",
+ "",
+ test_data,
+ "--boundary123",
+ ))
+
+ # phase1 - PUT request with multiphase commit conversation
+ # no object metadata in footer
+ put_timestamp = utils.Timestamp(time()).internal
+ headers = {
+ 'Content-Type': 'text/plain',
+ 'X-Timestamp': put_timestamp,
+ 'Transfer-Encoding': 'chunked',
+ 'Expect': '100-continue',
+ # normally the frag index gets sent in the MIME footer (which this
+ # test doesn't have, see `test_multiphase_put_metadata_footer`),
+ # but the proxy *could* send the frag index in the headers and
+ # this test verifies that would work.
+ 'X-Object-Sysmeta-Ec-Frag-Index': '2',
+ 'X-Backend-Storage-Policy-Index': '1',
+ 'X-Backend-Obj-Content-Length': len(test_data),
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
+ 'X-Backend-Obj-Multiphase-Commit': 'yes',
+ }
+ conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
+ 'PUT', '/a/c/o', headers=headers)
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+ headers = HeaderKeyDict(resp.getheaders())
+ self.assertEqual(headers['X-Obj-Multiphase-Commit'], 'yes')
+
+ to_send = "%x\r\n%s\r\n0\r\n\r\n" % (len(test_doc), test_doc)
+ conn.send(to_send)
+ # verify 100-continue response to mark end of phase1
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+
+ # send commit confirmation to start phase2
+ commit_confirmation_doc = "\r\n".join((
+ "X-Document: put commit",
+ "",
+ "commit_confirmation",
+ "--boundary123--",
+ ))
+ to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
+ (len(commit_confirmation_doc), commit_confirmation_doc)
+ conn.send(to_send)
+
+ # verify success (2xx) to make end of phase2
+ resp = conn.getresponse()
+ self.assertEqual(resp.status, 201)
+ resp.read()
+ resp.close()
+
+ # verify successful object data and durable state file write
+ obj_basename = os.path.join(
+ self.devices, 'sda1',
+ storage_directory(diskfile.get_data_dir(POLICIES[1]), '0',
+ hash_path('a', 'c', 'o')),
+ put_timestamp)
+ obj_datafile = obj_basename + '#2.data'
+ self.assertTrue(os.path.isfile(obj_datafile))
+ obj_durablefile = obj_basename + '.durable'
+ self.assertTrue(os.path.isfile(obj_durablefile))
+
+ def test_multiphase_put_draining(self):
+ # We want to ensure that we read the whole response body even if
+ # it's multipart MIME and there's document parts that we don't
+ # expect or understand. This'll help save our bacon if we ever jam
+ # more stuff in there.
+ in_a_timeout = [False]
+
+ # inherit from BaseException so we get a stack trace when the test
+ # fails instead of just a 500
+ class NotInATimeout(BaseException):
+ pass
+
+ class FakeTimeout(BaseException):
+ def __enter__(self):
+ in_a_timeout[0] = True
+
+ def __exit__(self, typ, value, tb):
+ in_a_timeout[0] = False
+
+ class PickyWsgiStringIO(WsgiStringIO):
+ def read(self, *a, **kw):
+ if not in_a_timeout[0]:
+ raise NotInATimeout()
+ return WsgiStringIO.read(self, *a, **kw)
+
+ def readline(self, *a, **kw):
+ if not in_a_timeout[0]:
+ raise NotInATimeout()
+ return WsgiStringIO.readline(self, *a, **kw)
+
+ test_data = 'obj data'
+ footer_meta = {
+ "X-Object-Sysmeta-Ec-Frag-Index": "7",
+ "Etag": md5(test_data).hexdigest(),
+ }
+ footer_json = json.dumps(footer_meta)
+ footer_meta_cksum = md5(footer_json).hexdigest()
+ test_doc = "\r\n".join((
+ "--boundary123",
+ "X-Document: object body",
+ "",
+ test_data,
+ "--boundary123",
+ "X-Document: object metadata",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_json,
+ "--boundary123",
+ "X-Document: we got cleverer",
+ "",
+ "stuff stuff meaningless stuuuuuuuuuuff",
+ "--boundary123",
+ "X-Document: we got even cleverer; can you believe it?",
+ "Waneshaft: ambifacient lunar",
+ "Casing: malleable logarithmic",
+ "",
+ "potato potato potato potato potato potato potato",
+ "--boundary123--"
+ ))
+
+ # phase1 - PUT request with object metadata in footer and
+ # multiphase commit conversation
+ put_timestamp = utils.Timestamp(time()).internal
+ headers = {
+ 'Content-Type': 'text/plain',
+ 'X-Timestamp': put_timestamp,
+ 'Transfer-Encoding': 'chunked',
+ 'Expect': '100-continue',
+ 'X-Backend-Storage-Policy-Index': '1',
+ 'X-Backend-Obj-Content-Length': len(test_data),
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
+ }
+ wsgi_input = PickyWsgiStringIO(test_doc)
+ req = Request.blank(
+ "/sda1/0/a/c/o",
+ environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': wsgi_input},
+ headers=headers)
+
+ app = object_server.ObjectController(self.conf, logger=self.logger)
+ with mock.patch('swift.obj.server.ChunkReadTimeout', FakeTimeout):
+ resp = req.get_response(app)
+ self.assertEqual(resp.status_int, 201) # sanity check
+
+ in_a_timeout[0] = True # so we can check without an exception
+ self.assertEqual(wsgi_input.read(), '') # we read all the bytes
+
+ def test_multiphase_put_bad_commit_message(self):
+ # Test 2-phase commit conversation - end of 1st phase marked
+ # by 100-continue response from the object server, with 2nd
+ # phase commit confirmation being received corrupt
+ test_data = 'obj data'
+ footer_meta = {
+ "X-Object-Sysmeta-Ec-Frag-Index": "7",
+ "Etag": md5(test_data).hexdigest(),
+ }
+ footer_json = json.dumps(footer_meta)
+ footer_meta_cksum = md5(footer_json).hexdigest()
+ test_doc = "\r\n".join((
+ "--boundary123",
+ "X-Document: object body",
+ "",
+ test_data,
+ "--boundary123",
+ "X-Document: object metadata",
+ "Content-MD5: " + footer_meta_cksum,
+ "",
+ footer_json,
+ "--boundary123",
+ ))
+
+ # phase1 - PUT request with object metadata in footer and
+ # multiphase commit conversation
+ put_timestamp = utils.Timestamp(time()).internal
+ headers = {
+ 'Content-Type': 'text/plain',
+ 'X-Timestamp': put_timestamp,
+ 'Transfer-Encoding': 'chunked',
+ 'Expect': '100-continue',
+ 'X-Backend-Storage-Policy-Index': '1',
+ 'X-Backend-Obj-Content-Length': len(test_data),
+ 'X-Backend-Obj-Metadata-Footer': 'yes',
+ 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
+ 'X-Backend-Obj-Multiphase-Commit': 'yes',
+ }
+ conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
+ 'PUT', '/a/c/o', headers=headers)
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+ headers = HeaderKeyDict(resp.getheaders())
+ self.assertEqual(headers['X-Obj-Multiphase-Commit'], 'yes')
+ self.assertEqual(headers['X-Obj-Metadata-Footer'], 'yes')
+
+ to_send = "%x\r\n%s\r\n0\r\n\r\n" % (len(test_doc), test_doc)
+ conn.send(to_send)
+ # verify 100-continue response to mark end of phase1
+ resp = conn.getexpect()
+ self.assertEqual(resp.status, 100)
+
+ # send commit confirmation to start phase2
+ commit_confirmation_doc = "\r\n".join((
+ "junkjunk",
+ "--boundary123--",
+ ))
+ to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
+ (len(commit_confirmation_doc), commit_confirmation_doc)
+ conn.send(to_send)
+ resp = conn.getresponse()
+ self.assertEqual(resp.status, 500)
+ resp.read()
+ resp.close()
+ # verify that durable file was NOT created
+ obj_basename = os.path.join(
+ self.devices, 'sda1',
+ storage_directory(diskfile.get_data_dir(1), '0',
+ hash_path('a', 'c', 'o')),
+ put_timestamp)
+ obj_datafile = obj_basename + '#7.data'
+ self.assertTrue(os.path.isfile(obj_datafile))
+ obj_durablefile = obj_basename + '.durable'
+ self.assertFalse(os.path.isfile(obj_durablefile))
+
+@patch_policies
class TestZeroCopy(unittest.TestCase):
"""Test the object server's zero-copy functionality"""
diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py
index 9af76185b..4a030c821 100644
--- a/test/unit/obj/test_ssync_receiver.py
+++ b/test/unit/obj/test_ssync_receiver.py
@@ -27,6 +27,7 @@ from swift.common import constraints
from swift.common import exceptions
from swift.common import swob
from swift.common import utils
+from swift.common.storage_policy import POLICIES
from swift.obj import diskfile
from swift.obj import server
from swift.obj import ssync_receiver
@@ -34,6 +35,7 @@ from swift.obj import ssync_receiver
from test import unit
+@unit.patch_policies()
class TestReceiver(unittest.TestCase):
def setUp(self):
@@ -46,12 +48,12 @@ class TestReceiver(unittest.TestCase):
self.testdir = os.path.join(
tempfile.mkdtemp(), 'tmp_test_ssync_receiver')
utils.mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
- conf = {
+ self.conf = {
'devices': self.testdir,
'mount_check': 'false',
'replication_one_per_device': 'false',
'log_requests': 'false'}
- self.controller = server.ObjectController(conf)
+ self.controller = server.ObjectController(self.conf)
self.controller.bytes_per_sync = 1
self.account1 = 'a'
@@ -91,14 +93,14 @@ class TestReceiver(unittest.TestCase):
lines.append(line)
return lines
- def test_REPLICATION_semaphore_locked(self):
+ def test_SSYNC_semaphore_locked(self):
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
self.controller.logger = mock.MagicMock()
mocked_replication_semaphore.acquire.return_value = False
req = swob.Request.blank(
- '/device/partition', environ={'REQUEST_METHOD': 'REPLICATION'})
+ '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
@@ -109,13 +111,13 @@ class TestReceiver(unittest.TestCase):
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
- def test_REPLICATION_calls_replication_lock(self):
+ def test_SSYNC_calls_replication_lock(self):
with mock.patch.object(
- self.controller._diskfile_mgr, 'replication_lock') as \
- mocked_replication_lock:
+ self.controller._diskfile_router[POLICIES.legacy],
+ 'replication_lock') as mocked_replication_lock:
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
@@ -130,7 +132,7 @@ class TestReceiver(unittest.TestCase):
def test_Receiver_with_default_storage_policy(self):
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
@@ -140,13 +142,15 @@ class TestReceiver(unittest.TestCase):
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
- self.assertEqual(rcvr.policy_idx, 0)
+ self.assertEqual(rcvr.policy, POLICIES[0])
- @unit.patch_policies()
def test_Receiver_with_storage_policy_index_header(self):
+ # update router post policy patch
+ self.controller._diskfile_router = diskfile.DiskFileRouter(
+ self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION',
+ environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
@@ -157,19 +161,58 @@ class TestReceiver(unittest.TestCase):
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
- self.assertEqual(rcvr.policy_idx, 1)
+ self.assertEqual(rcvr.policy, POLICIES[1])
+ self.assertEqual(rcvr.frag_index, None)
- def test_REPLICATION_replication_lock_fail(self):
+ def test_Receiver_with_bad_storage_policy_index_header(self):
+ valid_indices = sorted([int(policy) for policy in POLICIES])
+ bad_index = valid_indices[-1] + 1
+ req = swob.Request.blank(
+ '/sda1/1',
+ environ={'REQUEST_METHOD': 'SSYNC',
+ 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '0',
+ 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': bad_index},
+ body=':MISSING_CHECK: START\r\n'
+ ':MISSING_CHECK: END\r\n'
+ ':UPDATES: START\r\n:UPDATES: END\r\n')
+ self.controller.logger = mock.MagicMock()
+ receiver = ssync_receiver.Receiver(self.controller, req)
+ body_lines = [chunk.strip() for chunk in receiver() if chunk.strip()]
+ self.assertEqual(body_lines, [":ERROR: 503 'No policy with index 2'"])
+
+ @unit.patch_policies()
+ def test_Receiver_with_frag_index_header(self):
+ # update router post policy patch
+ self.controller._diskfile_router = diskfile.DiskFileRouter(
+ self.conf, self.controller.logger)
+ req = swob.Request.blank(
+ '/sda1/1',
+ environ={'REQUEST_METHOD': 'SSYNC',
+ 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
+ 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
+ body=':MISSING_CHECK: START\r\n'
+ ':MISSING_CHECK: END\r\n'
+ ':UPDATES: START\r\n:UPDATES: END\r\n')
+ rcvr = ssync_receiver.Receiver(self.controller, req)
+ body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
+ self.assertEqual(
+ body_lines,
+ [':MISSING_CHECK: START', ':MISSING_CHECK: END',
+ ':UPDATES: START', ':UPDATES: END'])
+ self.assertEqual(rcvr.policy, POLICIES[1])
+ self.assertEqual(rcvr.frag_index, 7)
+
+ def test_SSYNC_replication_lock_fail(self):
def _mock(path):
with exceptions.ReplicationLockTimeout(0.01, '/somewhere/' + path):
eventlet.sleep(0.05)
with mock.patch.object(
- self.controller._diskfile_mgr, 'replication_lock', _mock):
- self.controller._diskfile_mgr
+ self.controller._diskfile_router[POLICIES.legacy],
+ 'replication_lock', _mock):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
@@ -178,19 +221,19 @@ class TestReceiver(unittest.TestCase):
self.body_lines(resp.body),
[":ERROR: 0 '0.01 seconds: /somewhere/sda1'"])
self.controller.logger.debug.assert_called_once_with(
- 'None/sda1/1 REPLICATION LOCK TIMEOUT: 0.01 seconds: '
+ 'None/sda1/1 SSYNC LOCK TIMEOUT: 0.01 seconds: '
'/somewhere/sda1')
- def test_REPLICATION_initial_path(self):
+ def test_SSYNC_initial_path(self):
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
- '/device', environ={'REQUEST_METHOD': 'REPLICATION'})
+ '/device', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
- [":ERROR: 0 'Invalid path: /device'"])
+ [":ERROR: 400 'Invalid path: /device'"])
self.assertEqual(resp.status_int, 200)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
@@ -199,11 +242,11 @@ class TestReceiver(unittest.TestCase):
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
- '/device/', environ={'REQUEST_METHOD': 'REPLICATION'})
+ '/device/', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
- [":ERROR: 0 'Invalid path: /device/'"])
+ [":ERROR: 400 'Invalid path: /device/'"])
self.assertEqual(resp.status_int, 200)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
@@ -212,7 +255,7 @@ class TestReceiver(unittest.TestCase):
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
- '/device/partition', environ={'REQUEST_METHOD': 'REPLICATION'})
+ '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
@@ -226,28 +269,29 @@ class TestReceiver(unittest.TestCase):
mocked_replication_semaphore:
req = swob.Request.blank(
'/device/partition/junk',
- environ={'REQUEST_METHOD': 'REPLICATION'})
+ environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
- [":ERROR: 0 'Invalid path: /device/partition/junk'"])
+ [":ERROR: 400 'Invalid path: /device/partition/junk'"])
self.assertEqual(resp.status_int, 200)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
- def test_REPLICATION_mount_check(self):
+ def test_SSYNC_mount_check(self):
with contextlib.nested(
mock.patch.object(
self.controller, 'replication_semaphore'),
mock.patch.object(
- self.controller._diskfile_mgr, 'mount_check', False),
+ self.controller._diskfile_router[POLICIES.legacy],
+ 'mount_check', False),
mock.patch.object(
constraints, 'check_mount', return_value=False)) as (
mocked_replication_semaphore,
mocked_mount_check,
mocked_check_mount):
req = swob.Request.blank(
- '/device/partition', environ={'REQUEST_METHOD': 'REPLICATION'})
+ '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
@@ -259,14 +303,15 @@ class TestReceiver(unittest.TestCase):
mock.patch.object(
self.controller, 'replication_semaphore'),
mock.patch.object(
- self.controller._diskfile_mgr, 'mount_check', True),
+ self.controller._diskfile_router[POLICIES.legacy],
+ 'mount_check', True),
mock.patch.object(
constraints, 'check_mount', return_value=False)) as (
mocked_replication_semaphore,
mocked_mount_check,
mocked_check_mount):
req = swob.Request.blank(
- '/device/partition', environ={'REQUEST_METHOD': 'REPLICATION'})
+ '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
@@ -275,21 +320,23 @@ class TestReceiver(unittest.TestCase):
"device</p></html>'"])
self.assertEqual(resp.status_int, 200)
mocked_check_mount.assert_called_once_with(
- self.controller._diskfile_mgr.devices, 'device')
+ self.controller._diskfile_router[POLICIES.legacy].devices,
+ 'device')
mocked_check_mount.reset_mock()
mocked_check_mount.return_value = True
req = swob.Request.blank(
- '/device/partition', environ={'REQUEST_METHOD': 'REPLICATION'})
+ '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':ERROR: 0 "Looking for :MISSING_CHECK: START got \'\'"'])
self.assertEqual(resp.status_int, 200)
mocked_check_mount.assert_called_once_with(
- self.controller._diskfile_mgr.devices, 'device')
+ self.controller._diskfile_router[POLICIES.legacy].devices,
+ 'device')
- def test_REPLICATION_Exception(self):
+ def test_SSYNC_Exception(self):
class _Wrapper(StringIO.StringIO):
@@ -306,7 +353,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\nBad content is here')
req.remote_addr = '1.2.3.4'
@@ -324,7 +371,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger.exception.assert_called_once_with(
'1.2.3.4/device/partition EXCEPTION in replication.Receiver')
- def test_REPLICATION_Exception_Exception(self):
+ def test_SSYNC_Exception_Exception(self):
class _Wrapper(StringIO.StringIO):
@@ -341,7 +388,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\nBad content is here')
req.remote_addr = mock.MagicMock()
@@ -384,7 +431,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
'hash ts\r\n'
':MISSING_CHECK: END\r\n'
@@ -426,7 +473,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
'hash ts\r\n'
':MISSING_CHECK: END\r\n'
@@ -448,7 +495,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
@@ -466,7 +513,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
@@ -484,9 +531,36 @@ class TestReceiver(unittest.TestCase):
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
+ def test_MISSING_CHECK_extra_line_parts(self):
+ # check that rx tolerates extra parts in missing check lines to
+ # allow for protocol upgrades
+ extra_1 = 'extra'
+ extra_2 = 'multiple extra parts'
+ self.controller.logger = mock.MagicMock()
+ req = swob.Request.blank(
+ '/sda1/1',
+ environ={'REQUEST_METHOD': 'SSYNC'},
+ body=':MISSING_CHECK: START\r\n' +
+ self.hash1 + ' ' + self.ts1 + ' ' + extra_1 + '\r\n' +
+ self.hash2 + ' ' + self.ts2 + ' ' + extra_2 + '\r\n'
+ ':MISSING_CHECK: END\r\n'
+ ':UPDATES: START\r\n:UPDATES: END\r\n')
+ resp = req.get_response(self.controller)
+ self.assertEqual(
+ self.body_lines(resp.body),
+ [':MISSING_CHECK: START',
+ self.hash1,
+ self.hash2,
+ ':MISSING_CHECK: END',
+ ':UPDATES: START', ':UPDATES: END'])
+ self.assertEqual(resp.status_int, 200)
+ self.assertFalse(self.controller.logger.error.called)
+ self.assertFalse(self.controller.logger.exception.called)
+
def test_MISSING_CHECK_have_one_exact(self):
object_dir = utils.storage_directory(
- os.path.join(self.testdir, 'sda1', diskfile.get_data_dir(0)),
+ os.path.join(self.testdir, 'sda1',
+ diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, self.ts1 + '.data'), 'w+')
@@ -498,7 +572,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
@@ -515,10 +589,13 @@ class TestReceiver(unittest.TestCase):
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
- @unit.patch_policies
def test_MISSING_CHECK_storage_policy(self):
+ # update router post policy patch
+ self.controller._diskfile_router = diskfile.DiskFileRouter(
+ self.conf, self.controller.logger)
object_dir = utils.storage_directory(
- os.path.join(self.testdir, 'sda1', diskfile.get_data_dir(1)),
+ os.path.join(self.testdir, 'sda1',
+ diskfile.get_data_dir(POLICIES[1])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, self.ts1 + '.data'), 'w+')
@@ -530,7 +607,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION',
+ environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
@@ -550,7 +627,8 @@ class TestReceiver(unittest.TestCase):
def test_MISSING_CHECK_have_one_newer(self):
object_dir = utils.storage_directory(
- os.path.join(self.testdir, 'sda1', diskfile.get_data_dir(0)),
+ os.path.join(self.testdir, 'sda1',
+ diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
newer_ts1 = utils.normalize_timestamp(float(self.ts1) + 1)
@@ -564,7 +642,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
@@ -583,7 +661,8 @@ class TestReceiver(unittest.TestCase):
def test_MISSING_CHECK_have_one_older(self):
object_dir = utils.storage_directory(
- os.path.join(self.testdir, 'sda1', diskfile.get_data_dir(0)),
+ os.path.join(self.testdir, 'sda1',
+ diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
older_ts1 = utils.normalize_timestamp(float(self.ts1) - 1)
@@ -597,7 +676,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
@@ -639,7 +718,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -686,7 +765,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -729,7 +808,7 @@ class TestReceiver(unittest.TestCase):
mock_shutdown_safe, mock_delete):
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -751,7 +830,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'bad_subrequest_line\r\n')
@@ -770,7 +849,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -790,7 +869,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n')
@@ -807,7 +886,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -824,7 +903,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -843,7 +922,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
@@ -861,7 +940,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -879,7 +958,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n\r\n')
@@ -896,7 +975,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
@@ -926,7 +1005,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
@@ -949,7 +1028,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
@@ -975,7 +1054,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
@@ -1003,7 +1082,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
@@ -1036,7 +1115,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
@@ -1072,8 +1151,10 @@ class TestReceiver(unittest.TestCase):
'content-encoding specialty-header')})
self.assertEqual(req.read_body, '1')
- @unit.patch_policies()
def test_UPDATES_with_storage_policy(self):
+ # update router post policy patch
+ self.controller._diskfile_router = diskfile.DiskFileRouter(
+ self.conf, self.controller.logger)
_PUT_request = [None]
@server.public
@@ -1086,7 +1167,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION',
+ environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
@@ -1135,7 +1216,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
@@ -1170,7 +1251,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'BONK /a/c/o\r\n'
@@ -1206,7 +1287,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o1\r\n'
@@ -1317,7 +1398,7 @@ class TestReceiver(unittest.TestCase):
self.assertEqual(_requests, [])
def test_UPDATES_subreq_does_not_read_all(self):
- # This tests that if a REPLICATION subrequest fails and doesn't read
+ # This tests that if a SSYNC subrequest fails and doesn't read
# all the subrequest body that it will read and throw away the rest of
# the body before moving on to the next subrequest.
# If you comment out the part in ssync_receiver where it does:
@@ -1346,7 +1427,7 @@ class TestReceiver(unittest.TestCase):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
- environ={'REQUEST_METHOD': 'REPLICATION'},
+ environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o1\r\n'
diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py
index 87efd64cc..42bd610eb 100644
--- a/test/unit/obj/test_ssync_sender.py
+++ b/test/unit/obj/test_ssync_sender.py
@@ -22,18 +22,24 @@ import time
import unittest
import eventlet
+import itertools
import mock
from swift.common import exceptions, utils
-from swift.obj import ssync_sender, diskfile
+from swift.common.storage_policy import POLICIES
+from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
+ DiskFileDeleted
+from swift.common.swob import Request
+from swift.common.utils import Timestamp, FileLikeIter
+from swift.obj import ssync_sender, diskfile, server, ssync_receiver
+from swift.obj.reconstructor import RebuildingECDiskFileStream
-from test.unit import DebugLogger, patch_policies
+from test.unit import debug_logger, patch_policies
class FakeReplicator(object):
-
- def __init__(self, testdir):
- self.logger = mock.MagicMock()
+ def __init__(self, testdir, policy=None):
+ self.logger = debug_logger('test-ssync-sender')
self.conn_timeout = 1
self.node_timeout = 2
self.http_timeout = 3
@@ -43,7 +49,9 @@ class FakeReplicator(object):
'devices': testdir,
'mount_check': 'false',
}
- self._diskfile_mgr = diskfile.DiskFileManager(conf, DebugLogger())
+ policy = POLICIES.default if policy is None else policy
+ self._diskfile_router = diskfile.DiskFileRouter(conf, self.logger)
+ self._diskfile_mgr = self._diskfile_router[policy]
class NullBufferedHTTPConnection(object):
@@ -90,39 +98,49 @@ class FakeConnection(object):
self.closed = True
-class TestSender(unittest.TestCase):
-
+class BaseTestSender(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_sender')
- self.replicator = FakeReplicator(self.testdir)
- self.sender = ssync_sender.Sender(self.replicator, None, None, None)
+ utils.mkdirs(os.path.join(self.testdir, 'dev'))
+ self.daemon = FakeReplicator(self.testdir)
+ self.sender = ssync_sender.Sender(self.daemon, None, None, None)
def tearDown(self):
- shutil.rmtree(self.tmpdir, ignore_errors=1)
+ shutil.rmtree(self.tmpdir, ignore_errors=True)
def _make_open_diskfile(self, device='dev', partition='9',
account='a', container='c', obj='o', body='test',
- extra_metadata=None, policy_idx=0):
+ extra_metadata=None, policy=None,
+ frag_index=None, timestamp=None, df_mgr=None):
+ policy = policy or POLICIES.legacy
object_parts = account, container, obj
- req_timestamp = utils.normalize_timestamp(time.time())
- df = self.sender.daemon._diskfile_mgr.get_diskfile(
- device, partition, *object_parts, policy_idx=policy_idx)
+ timestamp = Timestamp(time.time()) if timestamp is None else timestamp
+ if df_mgr is None:
+ df_mgr = self.daemon._diskfile_router[policy]
+ df = df_mgr.get_diskfile(
+ device, partition, *object_parts, policy=policy,
+ frag_index=frag_index)
content_length = len(body)
etag = hashlib.md5(body).hexdigest()
with df.create() as writer:
writer.write(body)
metadata = {
- 'X-Timestamp': req_timestamp,
- 'Content-Length': content_length,
+ 'X-Timestamp': timestamp.internal,
+ 'Content-Length': str(content_length),
'ETag': etag,
}
if extra_metadata:
metadata.update(extra_metadata)
writer.put(metadata)
+ writer.commit(timestamp)
df.open()
return df
+
+@patch_policies()
+class TestSender(BaseTestSender):
+
def test_call_catches_MessageTimeout(self):
def connect(self):
@@ -134,16 +152,16 @@ class TestSender(unittest.TestCase):
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
- job = dict(partition='9')
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ job = dict(partition='9', policy=POLICIES.legacy)
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
- call = self.replicator.logger.error.mock_calls[0]
- self.assertEqual(
- call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
- self.assertEqual(str(call[1][-1]), '1 second: test connect')
+ self.assertEquals(candidates, {})
+ error_lines = self.daemon.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ self.assertEqual('1.2.3.4:5678/sda1/9 1 second: test connect',
+ error_lines[0])
def test_call_catches_ReplicationException(self):
@@ -153,45 +171,44 @@ class TestSender(unittest.TestCase):
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
- job = dict(partition='9')
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ job = dict(partition='9', policy=POLICIES.legacy)
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
- call = self.replicator.logger.error.mock_calls[0]
- self.assertEqual(
- call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
- self.assertEqual(str(call[1][-1]), 'test connect')
+ self.assertEquals(candidates, {})
+ error_lines = self.daemon.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ self.assertEqual('1.2.3.4:5678/sda1/9 test connect',
+ error_lines[0])
def test_call_catches_other_exceptions(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
- job = dict(partition='9')
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ job = dict(partition='9', policy=POLICIES.legacy)
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
self.sender.connect = 'cause exception'
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
- call = self.replicator.logger.exception.mock_calls[0]
- self.assertEqual(
- call[1],
- ('%s:%s/%s/%s EXCEPTION in replication.Sender', '1.2.3.4', 5678,
- 'sda1', '9'))
+ self.assertEquals(candidates, {})
+ error_lines = self.daemon.logger.get_lines_for_level('error')
+ for line in error_lines:
+ self.assertTrue(line.startswith(
+ '1.2.3.4:5678/sda1/9 EXCEPTION in replication.Sender:'))
def test_call_catches_exception_handling_exception(self):
- node = dict(replication_ip='1.2.3.4', replication_port=5678,
- device='sda1')
- job = None # Will cause inside exception handler to fail
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ job = node = None # Will cause inside exception handler to fail
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
self.sender.connect = 'cause exception'
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
- self.replicator.logger.exception.assert_called_once_with(
- 'EXCEPTION in replication.Sender')
+ self.assertEquals(candidates, {})
+ error_lines = self.daemon.logger.get_lines_for_level('error')
+ for line in error_lines:
+ self.assertTrue(line.startswith(
+ 'EXCEPTION in replication.Sender'))
def test_call_calls_others(self):
self.sender.suffixes = ['abc']
@@ -201,7 +218,7 @@ class TestSender(unittest.TestCase):
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
- self.assertEquals(candidates, set())
+ self.assertEquals(candidates, {})
self.sender.connect.assert_called_once_with()
self.sender.missing_check.assert_called_once_with()
self.sender.updates.assert_called_once_with()
@@ -216,18 +233,17 @@ class TestSender(unittest.TestCase):
self.sender.failures = 1
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
+ self.assertEquals(candidates, {})
self.sender.connect.assert_called_once_with()
self.sender.missing_check.assert_called_once_with()
self.sender.updates.assert_called_once_with()
self.sender.disconnect.assert_called_once_with()
- @patch_policies
def test_connect(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
- device='sda1')
- job = dict(partition='9', policy_idx=1)
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ device='sda1', index=0)
+ job = dict(partition='9', policy=POLICIES[1])
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
with mock.patch(
'swift.obj.ssync_sender.bufferedhttp.BufferedHTTPConnection'
@@ -240,11 +256,12 @@ class TestSender(unittest.TestCase):
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
expectations = {
'putrequest': [
- mock.call('REPLICATION', '/sda1/9'),
+ mock.call('SSYNC', '/sda1/9'),
],
'putheader': [
mock.call('Transfer-Encoding', 'chunked'),
mock.call('X-Backend-Storage-Policy-Index', 1),
+ mock.call('X-Backend-Ssync-Frag-Index', 0),
],
'endheaders': [mock.call()],
}
@@ -255,10 +272,80 @@ class TestSender(unittest.TestCase):
method_name, mock_method.mock_calls,
expected_calls))
+ def test_call(self):
+ def patch_sender(sender):
+ sender.connect = mock.MagicMock()
+ sender.missing_check = mock.MagicMock()
+ sender.updates = mock.MagicMock()
+ sender.disconnect = mock.MagicMock()
+
+ node = dict(replication_ip='1.2.3.4', replication_port=5678,
+ device='sda1')
+ job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ 'frag_index': 0,
+ }
+ available_map = dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000'),
+ ('9d41d8cd98f00b204e9800998ecf0def',
+ '1380144472.22222'),
+ ('9d41d8cd98f00b204e9800998ecf1def',
+ '1380144474.44444')])
+
+ # no suffixes -> no work done
+ sender = ssync_sender.Sender(
+ self.daemon, node, job, [], remote_check_objs=None)
+ patch_sender(sender)
+ sender.available_map = available_map
+ success, candidates = sender()
+ self.assertTrue(success)
+ self.assertEqual({}, candidates)
+
+ # all objs in sync
+ sender = ssync_sender.Sender(
+ self.daemon, node, job, ['ignored'], remote_check_objs=None)
+ patch_sender(sender)
+ sender.available_map = available_map
+ success, candidates = sender()
+ self.assertTrue(success)
+ self.assertEqual(available_map, candidates)
+
+ # one obj not in sync, sync'ing faked, all objs should be in return set
+ wanted = '9d41d8cd98f00b204e9800998ecf0def'
+ sender = ssync_sender.Sender(
+ self.daemon, node, job, ['ignored'],
+ remote_check_objs=None)
+ patch_sender(sender)
+ sender.send_list = [wanted]
+ sender.available_map = available_map
+ success, candidates = sender()
+ self.assertTrue(success)
+ self.assertEqual(available_map, candidates)
+
+ # one obj not in sync, remote check only so that obj is not sync'd
+ # and should not be in the return set
+ wanted = '9d41d8cd98f00b204e9800998ecf0def'
+ remote_check_objs = set(available_map.keys())
+ sender = ssync_sender.Sender(
+ self.daemon, node, job, ['ignored'],
+ remote_check_objs=remote_check_objs)
+ patch_sender(sender)
+ sender.send_list = [wanted]
+ sender.available_map = available_map
+ success, candidates = sender()
+ self.assertTrue(success)
+ expected_map = dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000'),
+ ('9d41d8cd98f00b204e9800998ecf1def',
+ '1380144474.44444')])
+ self.assertEqual(expected_map, candidates)
+
def test_call_and_missing_check(self):
- def yield_hashes(device, partition, policy_index, suffixes=None):
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if device == 'dev' and partition == '9' and suffixes == ['abc'] \
- and policy_index == 0:
+ and policy == POLICIES.legacy:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
@@ -269,7 +356,12 @@ class TestSender(unittest.TestCase):
'No match for %r %r %r' % (device, partition, suffixes))
self.sender.connection = FakeConnection()
- self.sender.job = {'device': 'dev', 'partition': '9'}
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ 'frag_index': 0,
+ }
self.sender.suffixes = ['abc']
self.sender.response = FakeResponse(
chunk_body=(
@@ -282,13 +374,14 @@ class TestSender(unittest.TestCase):
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
- self.assertEqual(candidates, set(['9d41d8cd98f00b204e9800998ecf0abc']))
+ self.assertEqual(candidates, dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')]))
self.assertEqual(self.sender.failures, 0)
def test_call_and_missing_check_with_obj_list(self):
- def yield_hashes(device, partition, policy_index, suffixes=None):
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if device == 'dev' and partition == '9' and suffixes == ['abc'] \
- and policy_index == 0:
+ and policy == POLICIES.legacy:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
@@ -297,8 +390,13 @@ class TestSender(unittest.TestCase):
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
- job = {'device': 'dev', 'partition': '9'}
- self.sender = ssync_sender.Sender(self.replicator, None, job, ['abc'],
+ job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ 'frag_index': 0,
+ }
+ self.sender = ssync_sender.Sender(self.daemon, None, job, ['abc'],
['9d41d8cd98f00b204e9800998ecf0abc'])
self.sender.connection = FakeConnection()
self.sender.response = FakeResponse(
@@ -311,13 +409,14 @@ class TestSender(unittest.TestCase):
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
- self.assertEqual(candidates, set(['9d41d8cd98f00b204e9800998ecf0abc']))
+ self.assertEqual(candidates, dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')]))
self.assertEqual(self.sender.failures, 0)
def test_call_and_missing_check_with_obj_list_but_required(self):
- def yield_hashes(device, partition, policy_index, suffixes=None):
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if device == 'dev' and partition == '9' and suffixes == ['abc'] \
- and policy_index == 0:
+ and policy == POLICIES.legacy:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
@@ -326,8 +425,13 @@ class TestSender(unittest.TestCase):
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
- job = {'device': 'dev', 'partition': '9'}
- self.sender = ssync_sender.Sender(self.replicator, None, job, ['abc'],
+ job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ 'frag_index': 0,
+ }
+ self.sender = ssync_sender.Sender(self.daemon, None, job, ['abc'],
['9d41d8cd98f00b204e9800998ecf0abc'])
self.sender.connection = FakeConnection()
self.sender.response = FakeResponse(
@@ -341,14 +445,14 @@ class TestSender(unittest.TestCase):
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
- self.assertEqual(candidates, set())
+ self.assertEqual(candidates, {})
def test_connect_send_timeout(self):
- self.replicator.conn_timeout = 0.01
+ self.daemon.conn_timeout = 0.01
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
- job = dict(partition='9')
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ job = dict(partition='9', policy=POLICIES.legacy)
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
def putrequest(*args, **kwargs):
@@ -359,18 +463,18 @@ class TestSender(unittest.TestCase):
'putrequest', putrequest):
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
- call = self.replicator.logger.error.mock_calls[0]
- self.assertEqual(
- call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
- self.assertEqual(str(call[1][-1]), '0.01 seconds: connect send')
+ self.assertEquals(candidates, {})
+ error_lines = self.daemon.logger.get_lines_for_level('error')
+ for line in error_lines:
+ self.assertTrue(line.startswith(
+ '1.2.3.4:5678/sda1/9 0.01 seconds: connect send'))
def test_connect_receive_timeout(self):
- self.replicator.node_timeout = 0.02
+ self.daemon.node_timeout = 0.02
node = dict(replication_ip='1.2.3.4', replication_port=5678,
- device='sda1')
- job = dict(partition='9')
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ device='sda1', index=0)
+ job = dict(partition='9', policy=POLICIES.legacy)
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
@@ -383,18 +487,18 @@ class TestSender(unittest.TestCase):
FakeBufferedHTTPConnection):
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
- call = self.replicator.logger.error.mock_calls[0]
- self.assertEqual(
- call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
- self.assertEqual(str(call[1][-1]), '0.02 seconds: connect receive')
+ self.assertEquals(candidates, {})
+ error_lines = self.daemon.logger.get_lines_for_level('error')
+ for line in error_lines:
+ self.assertTrue(line.startswith(
+ '1.2.3.4:5678/sda1/9 0.02 seconds: connect receive'))
def test_connect_bad_status(self):
- self.replicator.node_timeout = 0.02
+ self.daemon.node_timeout = 0.02
node = dict(replication_ip='1.2.3.4', replication_port=5678,
- device='sda1')
- job = dict(partition='9')
- self.sender = ssync_sender.Sender(self.replicator, node, job, None)
+ device='sda1', index=0)
+ job = dict(partition='9', policy=POLICIES.legacy)
+ self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
@@ -408,11 +512,11 @@ class TestSender(unittest.TestCase):
FakeBufferedHTTPConnection):
success, candidates = self.sender()
self.assertFalse(success)
- self.assertEquals(candidates, set())
- call = self.replicator.logger.error.mock_calls[0]
- self.assertEqual(
- call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
- self.assertEqual(str(call[1][-1]), 'Expected status 200; got 503')
+ self.assertEquals(candidates, {})
+ error_lines = self.daemon.logger.get_lines_for_level('error')
+ for line in error_lines:
+ self.assertTrue(line.startswith(
+ '1.2.3.4:5678/sda1/9 Expected status 200; got 503'))
def test_readline_newline_in_buffer(self):
self.sender.response_buffer = 'Has a newline already.\r\nOkay.'
@@ -420,7 +524,7 @@ class TestSender(unittest.TestCase):
self.assertEqual(self.sender.response_buffer, 'Okay.')
def test_readline_buffer_exceeds_network_chunk_size_somehow(self):
- self.replicator.network_chunk_size = 2
+ self.daemon.network_chunk_size = 2
self.sender.response_buffer = '1234567890'
self.assertEqual(self.sender.readline(), '1234567890')
self.assertEqual(self.sender.response_buffer, '')
@@ -473,16 +577,21 @@ class TestSender(unittest.TestCase):
self.assertRaises(exceptions.MessageTimeout, self.sender.missing_check)
def test_missing_check_has_empty_suffixes(self):
- def yield_hashes(device, partition, policy_idx, suffixes=None):
- if (device != 'dev' or partition != '9' or policy_idx != 0 or
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
+ if (device != 'dev' or partition != '9' or
+ policy != POLICIES.legacy or
suffixes != ['abc', 'def']):
yield # Just here to make this a generator
raise Exception(
'No match for %r %r %r %r' % (device, partition,
- policy_idx, suffixes))
+ policy, suffixes))
self.sender.connection = FakeConnection()
- self.sender.job = {'device': 'dev', 'partition': '9'}
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ }
self.sender.suffixes = ['abc', 'def']
self.sender.response = FakeResponse(
chunk_body=(
@@ -495,11 +604,12 @@ class TestSender(unittest.TestCase):
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(self.sender.send_list, [])
- self.assertEqual(self.sender.available_set, set())
+ self.assertEqual(self.sender.available_map, {})
def test_missing_check_has_suffixes(self):
- def yield_hashes(device, partition, policy_idx, suffixes=None):
- if (device == 'dev' and partition == '9' and policy_idx == 0 and
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
+ if (device == 'dev' and partition == '9' and
+ policy == POLICIES.legacy and
suffixes == ['abc', 'def']):
yield (
'/srv/node/dev/objects/9/abc/'
@@ -519,10 +629,14 @@ class TestSender(unittest.TestCase):
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
- policy_idx, suffixes))
+ policy, suffixes))
self.sender.connection = FakeConnection()
- self.sender.job = {'device': 'dev', 'partition': '9'}
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ }
self.sender.suffixes = ['abc', 'def']
self.sender.response = FakeResponse(
chunk_body=(
@@ -538,14 +652,15 @@ class TestSender(unittest.TestCase):
'33\r\n9d41d8cd98f00b204e9800998ecf1def 1380144474.44444\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(self.sender.send_list, [])
- candidates = ['9d41d8cd98f00b204e9800998ecf0abc',
- '9d41d8cd98f00b204e9800998ecf0def',
- '9d41d8cd98f00b204e9800998ecf1def']
- self.assertEqual(self.sender.available_set, set(candidates))
+ candidates = [('9d41d8cd98f00b204e9800998ecf0abc', '1380144470.00000'),
+ ('9d41d8cd98f00b204e9800998ecf0def', '1380144472.22222'),
+ ('9d41d8cd98f00b204e9800998ecf1def', '1380144474.44444')]
+ self.assertEqual(self.sender.available_map, dict(candidates))
def test_missing_check_far_end_disconnect(self):
- def yield_hashes(device, partition, policy_idx, suffixes=None):
- if (device == 'dev' and partition == '9' and policy_idx == 0 and
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
+ if (device == 'dev' and partition == '9' and
+ policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'/srv/node/dev/objects/9/abc/'
@@ -555,10 +670,14 @@ class TestSender(unittest.TestCase):
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
- policy_idx, suffixes))
+ policy, suffixes))
self.sender.connection = FakeConnection()
- self.sender.job = {'device': 'dev', 'partition': '9'}
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ }
self.sender.suffixes = ['abc']
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.response = FakeResponse(chunk_body='\r\n')
@@ -573,12 +692,14 @@ class TestSender(unittest.TestCase):
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
- self.assertEqual(self.sender.available_set,
- set(['9d41d8cd98f00b204e9800998ecf0abc']))
+ self.assertEqual(self.sender.available_map,
+ dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')]))
def test_missing_check_far_end_disconnect2(self):
- def yield_hashes(device, partition, policy_idx, suffixes=None):
- if (device == 'dev' and partition == '9' and policy_idx == 0 and
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
+ if (device == 'dev' and partition == '9' and
+ policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'/srv/node/dev/objects/9/abc/'
@@ -588,10 +709,14 @@ class TestSender(unittest.TestCase):
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
- policy_idx, suffixes))
+ policy, suffixes))
self.sender.connection = FakeConnection()
- self.sender.job = {'device': 'dev', 'partition': '9'}
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ }
self.sender.suffixes = ['abc']
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.response = FakeResponse(
@@ -607,12 +732,14 @@ class TestSender(unittest.TestCase):
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
- self.assertEqual(self.sender.available_set,
- set(['9d41d8cd98f00b204e9800998ecf0abc']))
+ self.assertEqual(self.sender.available_map,
+ dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')]))
def test_missing_check_far_end_unexpected(self):
- def yield_hashes(device, partition, policy_idx, suffixes=None):
- if (device == 'dev' and partition == '9' and policy_idx == 0 and
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
+ if (device == 'dev' and partition == '9' and
+ policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'/srv/node/dev/objects/9/abc/'
@@ -622,10 +749,14 @@ class TestSender(unittest.TestCase):
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
- policy_idx, suffixes))
+ policy, suffixes))
self.sender.connection = FakeConnection()
- self.sender.job = {'device': 'dev', 'partition': '9'}
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ }
self.sender.suffixes = ['abc']
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.response = FakeResponse(chunk_body='OH HAI\r\n')
@@ -640,12 +771,14 @@ class TestSender(unittest.TestCase):
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
- self.assertEqual(self.sender.available_set,
- set(['9d41d8cd98f00b204e9800998ecf0abc']))
+ self.assertEqual(self.sender.available_map,
+ dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')]))
def test_missing_check_send_list(self):
- def yield_hashes(device, partition, policy_idx, suffixes=None):
- if (device == 'dev' and partition == '9' and policy_idx == 0 and
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
+ if (device == 'dev' and partition == '9' and
+ policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'/srv/node/dev/objects/9/abc/'
@@ -655,10 +788,14 @@ class TestSender(unittest.TestCase):
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
- policy_idx, suffixes))
+ policy, suffixes))
self.sender.connection = FakeConnection()
- self.sender.job = {'device': 'dev', 'partition': '9'}
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ }
self.sender.suffixes = ['abc']
self.sender.response = FakeResponse(
chunk_body=(
@@ -673,8 +810,45 @@ class TestSender(unittest.TestCase):
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(self.sender.send_list, ['0123abc'])
- self.assertEqual(self.sender.available_set,
- set(['9d41d8cd98f00b204e9800998ecf0abc']))
+ self.assertEqual(self.sender.available_map,
+ dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')]))
+
+ def test_missing_check_extra_line_parts(self):
+ # check that sender tolerates extra parts in missing check
+ # line responses to allow for protocol upgrades
+ def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
+ if (device == 'dev' and partition == '9' and
+ policy == POLICIES.legacy and
+ suffixes == ['abc']):
+ yield (
+ '/srv/node/dev/objects/9/abc/'
+ '9d41d8cd98f00b204e9800998ecf0abc',
+ '9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')
+ else:
+ raise Exception(
+ 'No match for %r %r %r %r' % (device, partition,
+ policy, suffixes))
+
+ self.sender.connection = FakeConnection()
+ self.sender.job = {
+ 'device': 'dev',
+ 'partition': '9',
+ 'policy': POLICIES.legacy,
+ }
+ self.sender.suffixes = ['abc']
+ self.sender.response = FakeResponse(
+ chunk_body=(
+ ':MISSING_CHECK: START\r\n'
+ '0123abc extra response parts\r\n'
+ ':MISSING_CHECK: END\r\n'))
+ self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
+ self.sender.missing_check()
+ self.assertEqual(self.sender.send_list, ['0123abc'])
+ self.assertEqual(self.sender.available_map,
+ dict([('9d41d8cd98f00b204e9800998ecf0abc',
+ '1380144470.00000')]))
def test_updates_timeout(self):
self.sender.connection = FakeConnection()
@@ -742,7 +916,12 @@ class TestSender(unittest.TestCase):
delete_timestamp = utils.normalize_timestamp(time.time())
df.delete(delete_timestamp)
self.sender.connection = FakeConnection()
- self.sender.job = {'device': device, 'partition': part}
+ self.sender.job = {
+ 'device': device,
+ 'partition': part,
+ 'policy': POLICIES.legacy,
+ 'frag_index': 0,
+ }
self.sender.node = {}
self.sender.send_list = [object_hash]
self.sender.send_delete = mock.MagicMock()
@@ -771,7 +950,12 @@ class TestSender(unittest.TestCase):
delete_timestamp = utils.normalize_timestamp(time.time())
df.delete(delete_timestamp)
self.sender.connection = FakeConnection()
- self.sender.job = {'device': device, 'partition': part}
+ self.sender.job = {
+ 'device': device,
+ 'partition': part,
+ 'policy': POLICIES.legacy,
+ 'frag_index': 0,
+ }
self.sender.node = {}
self.sender.send_list = [object_hash]
self.sender.response = FakeResponse(
@@ -797,7 +981,12 @@ class TestSender(unittest.TestCase):
object_hash = utils.hash_path(*object_parts)
expected = df.get_metadata()
self.sender.connection = FakeConnection()
- self.sender.job = {'device': device, 'partition': part}
+ self.sender.job = {
+ 'device': device,
+ 'partition': part,
+ 'policy': POLICIES.legacy,
+ 'frag_index': 0,
+ }
self.sender.node = {}
self.sender.send_list = [object_hash]
self.sender.send_delete = mock.MagicMock()
@@ -821,18 +1010,20 @@ class TestSender(unittest.TestCase):
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
- @patch_policies
def test_updates_storage_policy_index(self):
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
df = self._make_open_diskfile(device, part, *object_parts,
- policy_idx=1)
+ policy=POLICIES[0])
object_hash = utils.hash_path(*object_parts)
expected = df.get_metadata()
self.sender.connection = FakeConnection()
- self.sender.job = {'device': device, 'partition': part,
- 'policy_idx': 1}
+ self.sender.job = {
+ 'device': device,
+ 'partition': part,
+ 'policy': POLICIES[0],
+ 'frag_index': 0}
self.sender.node = {}
self.sender.send_list = [object_hash]
self.sender.send_delete = mock.MagicMock()
@@ -847,7 +1038,7 @@ class TestSender(unittest.TestCase):
self.assertEqual(path, '/a/c/o')
self.assert_(isinstance(df, diskfile.DiskFile))
self.assertEqual(expected, df.get_metadata())
- self.assertEqual(os.path.join(self.testdir, 'dev/objects-1/9/',
+ self.assertEqual(os.path.join(self.testdir, 'dev/objects/9/',
object_hash[-3:], object_hash),
df._datadir)
@@ -1054,5 +1245,466 @@ class TestSender(unittest.TestCase):
self.assertTrue(self.sender.connection.closed)
+@patch_policies(with_ec_default=True)
+class TestSsync(BaseTestSender):
+ """
+ Test interactions between sender and receiver. The basis for each test is
+ actual diskfile state on either side - the connection between sender and
+ receiver is faked. Assertions are made about the final state of the sender
+ and receiver diskfiles.
+ """
+
+ def make_fake_ssync_connect(self, sender, rx_obj_controller, device,
+ partition, policy):
+ trace = []
+
+ def add_trace(type, msg):
+ # record a protocol event for later analysis
+ if msg.strip():
+ trace.append((type, msg.strip()))
+
+ def start_response(status, headers, exc_info=None):
+ assert(status == '200 OK')
+
+ class FakeConnection:
+ def __init__(self, trace):
+ self.trace = trace
+ self.queue = []
+ self.src = FileLikeIter(self.queue)
+
+ def send(self, msg):
+ msg = msg.split('\r\n', 1)[1]
+ msg = msg.rsplit('\r\n', 1)[0]
+ add_trace('tx', msg)
+ self.queue.append(msg)
+
+ def close(self):
+ pass
+
+ def wrap_gen(gen):
+ # Strip response head and tail
+ while True:
+ try:
+ msg = gen.next()
+ if msg:
+ add_trace('rx', msg)
+ msg = '%x\r\n%s\r\n' % (len(msg), msg)
+ yield msg
+ except StopIteration:
+ break
+
+ def fake_connect():
+ sender.connection = FakeConnection(trace)
+ headers = {'Transfer-Encoding': 'chunked',
+ 'X-Backend-Storage-Policy-Index': str(int(policy))}
+ env = {'REQUEST_METHOD': 'SSYNC'}
+ path = '/%s/%s' % (device, partition)
+ req = Request.blank(path, environ=env, headers=headers)
+ req.environ['wsgi.input'] = sender.connection.src
+ resp = rx_obj_controller(req.environ, start_response)
+ wrapped_gen = wrap_gen(resp)
+ sender.response = FileLikeIter(wrapped_gen)
+ sender.response.fp = sender.response
+ return fake_connect
+
+ def setUp(self):
+ self.device = 'dev'
+ self.partition = '9'
+ self.tmpdir = tempfile.mkdtemp()
+ # sender side setup
+ self.tx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_sender')
+ utils.mkdirs(os.path.join(self.tx_testdir, self.device))
+ self.daemon = FakeReplicator(self.tx_testdir)
+
+ # rx side setup
+ self.rx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_receiver')
+ utils.mkdirs(os.path.join(self.rx_testdir, self.device))
+ conf = {
+ 'devices': self.rx_testdir,
+ 'mount_check': 'false',
+ 'replication_one_per_device': 'false',
+ 'log_requests': 'false'}
+ self.rx_controller = server.ObjectController(conf)
+ self.orig_ensure_flush = ssync_receiver.Receiver._ensure_flush
+ ssync_receiver.Receiver._ensure_flush = lambda *args: ''
+ self.ts_iter = (Timestamp(t)
+ for t in itertools.count(int(time.time())))
+
+ def tearDown(self):
+ if self.orig_ensure_flush:
+ ssync_receiver.Receiver._ensure_flush = self.orig_ensure_flush
+ shutil.rmtree(self.tmpdir, ignore_errors=True)
+
+ def _create_ondisk_files(self, df_mgr, obj_name, policy, timestamp,
+ frag_indexes=None):
+ frag_indexes = [] if frag_indexes is None else frag_indexes
+ metadata = {'Content-Type': 'plain/text'}
+ diskfiles = []
+ for frag_index in frag_indexes:
+ object_data = '/a/c/%s___%s' % (obj_name, frag_index)
+ if frag_index is not None:
+ metadata['X-Object-Sysmeta-Ec-Frag-Index'] = str(frag_index)
+ df = self._make_open_diskfile(
+ device=self.device, partition=self.partition, account='a',
+ container='c', obj=obj_name, body=object_data,
+ extra_metadata=metadata, timestamp=timestamp, policy=policy,
+ frag_index=frag_index, df_mgr=df_mgr)
+ # sanity checks
+ listing = os.listdir(df._datadir)
+ self.assertTrue(listing)
+ for filename in listing:
+ self.assertTrue(filename.startswith(timestamp.internal))
+ diskfiles.append(df)
+ return diskfiles
+
+ def _open_tx_diskfile(self, obj_name, policy, frag_index=None):
+ df_mgr = self.daemon._diskfile_router[policy]
+ df = df_mgr.get_diskfile(
+ self.device, self.partition, account='a', container='c',
+ obj=obj_name, policy=policy, frag_index=frag_index)
+ df.open()
+ return df
+
+ def _open_rx_diskfile(self, obj_name, policy, frag_index=None):
+ df = self.rx_controller.get_diskfile(
+ self.device, self.partition, 'a', 'c', obj_name, policy=policy,
+ frag_index=frag_index)
+ df.open()
+ return df
+
+ def _verify_diskfile_sync(self, tx_df, rx_df, frag_index):
+ # verify that diskfiles' metadata match
+ # sanity check, they are not the same ondisk files!
+ self.assertNotEqual(tx_df._datadir, rx_df._datadir)
+ rx_metadata = dict(rx_df.get_metadata())
+ for k, v in tx_df.get_metadata().iteritems():
+ self.assertEqual(v, rx_metadata.pop(k))
+ # ugh, ssync duplicates ETag with Etag so have to clear it out here
+ if 'Etag' in rx_metadata:
+ rx_metadata.pop('Etag')
+ self.assertFalse(rx_metadata)
+ if frag_index:
+ rx_metadata = rx_df.get_metadata()
+ fi_key = 'X-Object-Sysmeta-Ec-Frag-Index'
+ self.assertTrue(fi_key in rx_metadata)
+ self.assertEqual(frag_index, int(rx_metadata[fi_key]))
+
+ def _analyze_trace(self, trace):
+ """
+ Parse protocol trace captured by fake connection, making some
+ assertions along the way, and return results as a dict of form:
+ results = {'tx_missing': <list of messages>,
+ 'rx_missing': <list of messages>,
+ 'tx_updates': <list of subreqs>,
+ 'rx_updates': <list of messages>}
+
+ Each subreq is a dict with keys: 'method', 'path', 'headers', 'body'
+ """
+ def tx_missing(results, line):
+ self.assertEqual('tx', line[0])
+ results['tx_missing'].append(line[1])
+
+ def rx_missing(results, line):
+ self.assertEqual('rx', line[0])
+ parts = line[1].split('\r\n')
+ for part in parts:
+ results['rx_missing'].append(part)
+
+ def tx_updates(results, line):
+ self.assertEqual('tx', line[0])
+ subrequests = results['tx_updates']
+ if line[1].startswith(('PUT', 'DELETE')):
+ parts = line[1].split('\r\n')
+ method, path = parts[0].split()
+ subreq = {'method': method, 'path': path, 'req': line[1],
+ 'headers': parts[1:]}
+ subrequests.append(subreq)
+ else:
+ self.assertTrue(subrequests)
+ body = (subrequests[-1]).setdefault('body', '')
+ body += line[1]
+ subrequests[-1]['body'] = body
+
+ def rx_updates(results, line):
+ self.assertEqual('rx', line[0])
+ results.setdefault['rx_updates'].append(line[1])
+
+ def unexpected(results, line):
+ results.setdefault('unexpected', []).append(line)
+
+ # each trace line is a tuple of ([tx|rx], msg)
+ handshakes = iter([(('tx', ':MISSING_CHECK: START'), tx_missing),
+ (('tx', ':MISSING_CHECK: END'), unexpected),
+ (('rx', ':MISSING_CHECK: START'), rx_missing),
+ (('rx', ':MISSING_CHECK: END'), unexpected),
+ (('tx', ':UPDATES: START'), tx_updates),
+ (('tx', ':UPDATES: END'), unexpected),
+ (('rx', ':UPDATES: START'), rx_updates),
+ (('rx', ':UPDATES: END'), unexpected)])
+ expect_handshake = handshakes.next()
+ phases = ('tx_missing', 'rx_missing', 'tx_updates', 'rx_updates')
+ results = dict((k, []) for k in phases)
+ handler = unexpected
+ lines = list(trace)
+ lines.reverse()
+ while lines:
+ line = lines.pop()
+ if line == expect_handshake[0]:
+ handler = expect_handshake[1]
+ try:
+ expect_handshake = handshakes.next()
+ except StopIteration:
+ # should be the last line
+ self.assertFalse(
+ lines, 'Unexpected trailing lines %s' % lines)
+ continue
+ handler(results, line)
+
+ try:
+ # check all handshakes occurred
+ missed = handshakes.next()
+ self.fail('Handshake %s not found' % str(missed[0]))
+ except StopIteration:
+ pass
+ # check no message outside of a phase
+ self.assertFalse(results.get('unexpected'),
+ 'Message outside of a phase: %s' % results.get(None))
+ return results
+
+ def _verify_ondisk_files(self, tx_objs, policy, rx_node_index):
+ # verify tx and rx files that should be in sync
+ for o_name, diskfiles in tx_objs.iteritems():
+ for tx_df in diskfiles:
+ frag_index = tx_df._frag_index
+ if frag_index == rx_node_index:
+ # this frag_index should have been sync'd,
+ # check rx file is ok
+ rx_df = self._open_rx_diskfile(o_name, policy, frag_index)
+ self._verify_diskfile_sync(tx_df, rx_df, frag_index)
+ expected_body = '/a/c/%s___%s' % (o_name, rx_node_index)
+ actual_body = ''.join([chunk for chunk in rx_df.reader()])
+ self.assertEqual(expected_body, actual_body)
+ else:
+ # this frag_index should not have been sync'd,
+ # check no rx file,
+ self.assertRaises(DiskFileNotExist,
+ self._open_rx_diskfile,
+ o_name, policy, frag_index=frag_index)
+ # check tx file still intact - ssync does not do any cleanup!
+ self._open_tx_diskfile(o_name, policy, frag_index)
+
+ def _verify_tombstones(self, tx_objs, policy):
+ # verify tx and rx tombstones that should be in sync
+ for o_name, diskfiles in tx_objs.iteritems():
+ for tx_df_ in diskfiles:
+ try:
+ self._open_tx_diskfile(o_name, policy)
+ self.fail('DiskFileDeleted expected')
+ except DiskFileDeleted as exc:
+ tx_delete_time = exc.timestamp
+ try:
+ self._open_rx_diskfile(o_name, policy)
+ self.fail('DiskFileDeleted expected')
+ except DiskFileDeleted as exc:
+ rx_delete_time = exc.timestamp
+ self.assertEqual(tx_delete_time, rx_delete_time)
+
+ def test_handoff_fragment_revert(self):
+ # test that a sync_revert type job does send the correct frag archives
+ # to the receiver, and that those frag archives are then removed from
+ # local node.
+ policy = POLICIES.default
+ rx_node_index = 0
+ tx_node_index = 1
+ frag_index = rx_node_index
+
+ # create sender side diskfiles...
+ tx_objs = {}
+ rx_objs = {}
+ tx_tombstones = {}
+ tx_df_mgr = self.daemon._diskfile_router[policy]
+ rx_df_mgr = self.rx_controller._diskfile_router[policy]
+ # o1 has primary and handoff fragment archives
+ t1 = self.ts_iter.next()
+ tx_objs['o1'] = self._create_ondisk_files(
+ tx_df_mgr, 'o1', policy, t1, (rx_node_index, tx_node_index))
+ # o2 only has primary
+ t2 = self.ts_iter.next()
+ tx_objs['o2'] = self._create_ondisk_files(
+ tx_df_mgr, 'o2', policy, t2, (tx_node_index,))
+ # o3 only has handoff
+ t3 = self.ts_iter.next()
+ tx_objs['o3'] = self._create_ondisk_files(
+ tx_df_mgr, 'o3', policy, t3, (rx_node_index,))
+ # o4 primary and handoff fragment archives on tx, handoff in sync on rx
+ t4 = self.ts_iter.next()
+ tx_objs['o4'] = self._create_ondisk_files(
+ tx_df_mgr, 'o4', policy, t4, (tx_node_index, rx_node_index,))
+ rx_objs['o4'] = self._create_ondisk_files(
+ rx_df_mgr, 'o4', policy, t4, (rx_node_index,))
+ # o5 is a tombstone, missing on receiver
+ t5 = self.ts_iter.next()
+ tx_tombstones['o5'] = self._create_ondisk_files(
+ tx_df_mgr, 'o5', policy, t5, (tx_node_index,))
+ tx_tombstones['o5'][0].delete(t5)
+
+ suffixes = set()
+ for diskfiles in (tx_objs.values() + tx_tombstones.values()):
+ for df in diskfiles:
+ suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
+
+ # create ssync sender instance...
+ job = {'device': self.device,
+ 'partition': self.partition,
+ 'policy': policy,
+ 'frag_index': frag_index,
+ 'purge': True}
+ node = {'index': rx_node_index}
+ self.sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
+ # fake connection from tx to rx...
+ self.sender.connect = self.make_fake_ssync_connect(
+ self.sender, self.rx_controller, self.device, self.partition,
+ policy)
+
+ # run the sync protocol...
+ self.sender()
+
+ # verify protocol
+ results = self._analyze_trace(self.sender.connection.trace)
+ # sender has handoff frags for o1, o3 and o4 and ts for o5
+ self.assertEqual(4, len(results['tx_missing']))
+ # receiver is missing frags for o1, o3 and ts for o5
+ self.assertEqual(3, len(results['rx_missing']))
+ self.assertEqual(3, len(results['tx_updates']))
+ self.assertFalse(results['rx_updates'])
+ sync_paths = []
+ for subreq in results.get('tx_updates'):
+ if subreq.get('method') == 'PUT':
+ self.assertTrue(
+ 'X-Object-Sysmeta-Ec-Frag-Index: %s' % rx_node_index
+ in subreq.get('headers'))
+ expected_body = '%s___%s' % (subreq['path'], rx_node_index)
+ self.assertEqual(expected_body, subreq['body'])
+ elif subreq.get('method') == 'DELETE':
+ self.assertEqual('/a/c/o5', subreq['path'])
+ sync_paths.append(subreq.get('path'))
+ self.assertEqual(['/a/c/o1', '/a/c/o3', '/a/c/o5'], sorted(sync_paths))
+
+ # verify on disk files...
+ self._verify_ondisk_files(tx_objs, policy, rx_node_index)
+ self._verify_tombstones(tx_tombstones, policy)
+
+ def test_fragment_sync(self):
+ # check that a sync_only type job does call reconstructor to build a
+ # diskfile to send, and continues making progress despite an error
+ # when building one diskfile
+ policy = POLICIES.default
+ rx_node_index = 0
+ tx_node_index = 1
+ # for a sync job we iterate over frag index that belongs on local node
+ frag_index = tx_node_index
+
+ # create sender side diskfiles...
+ tx_objs = {}
+ tx_tombstones = {}
+ rx_objs = {}
+ tx_df_mgr = self.daemon._diskfile_router[policy]
+ rx_df_mgr = self.rx_controller._diskfile_router[policy]
+ # o1 only has primary
+ t1 = self.ts_iter.next()
+ tx_objs['o1'] = self._create_ondisk_files(
+ tx_df_mgr, 'o1', policy, t1, (tx_node_index,))
+ # o2 only has primary
+ t2 = self.ts_iter.next()
+ tx_objs['o2'] = self._create_ondisk_files(
+ tx_df_mgr, 'o2', policy, t2, (tx_node_index,))
+ # o3 only has primary
+ t3 = self.ts_iter.next()
+ tx_objs['o3'] = self._create_ondisk_files(
+ tx_df_mgr, 'o3', policy, t3, (tx_node_index,))
+ # o4 primary fragment archives on tx, handoff in sync on rx
+ t4 = self.ts_iter.next()
+ tx_objs['o4'] = self._create_ondisk_files(
+ tx_df_mgr, 'o4', policy, t4, (tx_node_index,))
+ rx_objs['o4'] = self._create_ondisk_files(
+ rx_df_mgr, 'o4', policy, t4, (rx_node_index,))
+ # o5 is a tombstone, missing on receiver
+ t5 = self.ts_iter.next()
+ tx_tombstones['o5'] = self._create_ondisk_files(
+ tx_df_mgr, 'o5', policy, t5, (tx_node_index,))
+ tx_tombstones['o5'][0].delete(t5)
+
+ suffixes = set()
+ for diskfiles in (tx_objs.values() + tx_tombstones.values()):
+ for df in diskfiles:
+ suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
+
+ reconstruct_fa_calls = []
+
+ def fake_reconstruct_fa(job, node, metadata):
+ reconstruct_fa_calls.append((job, node, policy, metadata))
+ if len(reconstruct_fa_calls) == 2:
+ # simulate second reconstruct failing
+ raise DiskFileError
+ content = '%s___%s' % (metadata['name'], rx_node_index)
+ return RebuildingECDiskFileStream(
+ metadata, rx_node_index, iter([content]))
+
+ # create ssync sender instance...
+ job = {'device': self.device,
+ 'partition': self.partition,
+ 'policy': policy,
+ 'frag_index': frag_index,
+ 'sync_diskfile_builder': fake_reconstruct_fa}
+ node = {'index': rx_node_index}
+ self.sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
+
+ # fake connection from tx to rx...
+ self.sender.connect = self.make_fake_ssync_connect(
+ self.sender, self.rx_controller, self.device, self.partition,
+ policy)
+
+ # run the sync protocol...
+ self.sender()
+
+ # verify protocol
+ results = self._analyze_trace(self.sender.connection.trace)
+ # sender has primary for o1, o2 and o3, o4 and ts for o5
+ self.assertEqual(5, len(results['tx_missing']))
+ # receiver is missing o1, o2 and o3 and ts for o5
+ self.assertEqual(4, len(results['rx_missing']))
+ # sender can only construct 2 out of 3 missing frags
+ self.assertEqual(3, len(results['tx_updates']))
+ self.assertEqual(3, len(reconstruct_fa_calls))
+ self.assertFalse(results['rx_updates'])
+ actual_sync_paths = []
+ for subreq in results.get('tx_updates'):
+ if subreq.get('method') == 'PUT':
+ self.assertTrue(
+ 'X-Object-Sysmeta-Ec-Frag-Index: %s' % rx_node_index
+ in subreq.get('headers'))
+ expected_body = '%s___%s' % (subreq['path'], rx_node_index)
+ self.assertEqual(expected_body, subreq['body'])
+ elif subreq.get('method') == 'DELETE':
+ self.assertEqual('/a/c/o5', subreq['path'])
+ actual_sync_paths.append(subreq.get('path'))
+
+ # remove the failed df from expected synced df's
+ expect_sync_paths = ['/a/c/o1', '/a/c/o2', '/a/c/o3', '/a/c/o5']
+ failed_path = reconstruct_fa_calls[1][3]['name']
+ expect_sync_paths.remove(failed_path)
+ failed_obj = None
+ for obj, diskfiles in tx_objs.iteritems():
+ if diskfiles[0]._name == failed_path:
+ failed_obj = obj
+ # sanity check
+ self.assertTrue(tx_objs.pop(failed_obj))
+
+ # verify on disk files...
+ self.assertEqual(sorted(expect_sync_paths), sorted(actual_sync_paths))
+ self._verify_ondisk_files(tx_objs, policy, rx_node_index)
+ self._verify_tombstones(tx_tombstones, policy)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py
index 1915a55d1..2ca396545 100644
--- a/test/unit/obj/test_updater.py
+++ b/test/unit/obj/test_updater.py
@@ -70,7 +70,7 @@ class TestObjectUpdater(unittest.TestCase):
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
for policy in POLICIES:
- os.mkdir(os.path.join(self.sda1, get_tmp_dir(int(policy))))
+ os.mkdir(os.path.join(self.sda1, get_tmp_dir(policy)))
self.logger = debug_logger()
def tearDown(self):
@@ -169,8 +169,8 @@ class TestObjectUpdater(unittest.TestCase):
seen = set()
class MockObjectUpdater(object_updater.ObjectUpdater):
- def process_object_update(self, update_path, device, idx):
- seen.add((update_path, idx))
+ def process_object_update(self, update_path, device, policy):
+ seen.add((update_path, int(policy)))
os.unlink(update_path)
cu = MockObjectUpdater({
@@ -216,7 +216,7 @@ class TestObjectUpdater(unittest.TestCase):
'concurrency': '1',
'node_timeout': '15'})
cu.run_once()
- async_dir = os.path.join(self.sda1, get_async_dir(0))
+ async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(async_dir)
cu.run_once()
self.assert_(os.path.exists(async_dir))
@@ -253,7 +253,7 @@ class TestObjectUpdater(unittest.TestCase):
'concurrency': '1',
'node_timeout': '15'}, logger=self.logger)
cu.run_once()
- async_dir = os.path.join(self.sda1, get_async_dir(0))
+ async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(async_dir)
cu.run_once()
self.assert_(os.path.exists(async_dir))
@@ -393,7 +393,7 @@ class TestObjectUpdater(unittest.TestCase):
'mount_check': 'false',
'swift_dir': self.testdir,
}
- async_dir = os.path.join(self.sda1, get_async_dir(policy.idx))
+ async_dir = os.path.join(self.sda1, get_async_dir(policy))
os.mkdir(async_dir)
account, container, obj = 'a', 'c', 'o'
@@ -412,7 +412,7 @@ class TestObjectUpdater(unittest.TestCase):
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
dfmanager.pickle_async_update(self.sda1, account, container, obj,
- data, ts.next(), policy.idx)
+ data, ts.next(), policy)
request_log = []
@@ -428,7 +428,7 @@ class TestObjectUpdater(unittest.TestCase):
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, op)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
- str(policy.idx))
+ str(int(policy)))
self.assertEqual(daemon.logger.get_increment_counts(),
{'successes': 1, 'unlinks': 1,
'async_pendings': 1})
@@ -444,7 +444,7 @@ class TestObjectUpdater(unittest.TestCase):
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
- async_dir = os.path.join(self.sda1, get_async_dir(policy.idx))
+ async_dir = os.path.join(self.sda1, get_async_dir(policy))
os.mkdir(async_dir)
# write an async
@@ -456,12 +456,12 @@ class TestObjectUpdater(unittest.TestCase):
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': ts.next(),
- 'X-Backend-Storage-Policy-Index': policy.idx,
+ 'X-Backend-Storage-Policy-Index': int(policy),
})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
dfmanager.pickle_async_update(self.sda1, account, container, obj,
- data, ts.next(), policy.idx)
+ data, ts.next(), policy)
request_log = []
@@ -481,7 +481,7 @@ class TestObjectUpdater(unittest.TestCase):
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, 'PUT')
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
- str(policy.idx))
+ str(int(policy)))
self.assertEqual(daemon.logger.get_increment_counts(),
{'successes': 1, 'unlinks': 1, 'async_pendings': 1})
diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py
index 2c2094ffe..037e28b44 100644
--- a/test/unit/proxy/controllers/test_base.py
+++ b/test/unit/proxy/controllers/test_base.py
@@ -21,9 +21,11 @@ from swift.proxy.controllers.base import headers_to_container_info, \
headers_to_account_info, headers_to_object_info, get_container_info, \
get_container_memcache_key, get_account_info, get_account_memcache_key, \
get_object_env_key, get_info, get_object_info, \
- Controller, GetOrHeadHandler, _set_info_cache, _set_object_info_cache
+ Controller, GetOrHeadHandler, _set_info_cache, _set_object_info_cache, \
+ bytes_to_skip
from swift.common.swob import Request, HTTPException, HeaderKeyDict, \
RESPONSE_REASONS
+from swift.common import exceptions
from swift.common.utils import split_path
from swift.common.http import is_success
from swift.common.storage_policy import StoragePolicy
@@ -159,9 +161,11 @@ class TestFuncs(unittest.TestCase):
def test_GETorHEAD_base(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o/with/slashes')
+ ring = FakeRing()
+ nodes = list(ring.get_part_nodes(0)) + list(ring.get_more_nodes(0))
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o/with/slashes')
self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ)
self.assertEqual(
@@ -169,14 +173,14 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a/c/o')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o')
self.assertTrue('swift.object/a/c/o' in resp.environ)
self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200)
req = Request.blank('/v1/a/c')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'container', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'container', iter(nodes), 'part',
'/a/c')
self.assertTrue('swift.container/a/c' in resp.environ)
self.assertEqual(resp.environ['swift.container/a/c']['status'], 200)
@@ -184,7 +188,7 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'account', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'account', iter(nodes), 'part',
'/a')
self.assertTrue('swift.account/a' in resp.environ)
self.assertEqual(resp.environ['swift.account/a']['status'], 200)
@@ -546,7 +550,7 @@ class TestFuncs(unittest.TestCase):
resp,
headers_to_object_info(headers.items(), 200))
- def test_have_quorum(self):
+ def test_base_have_quorum(self):
base = Controller(self.app)
# just throw a bunch of test cases at it
self.assertEqual(base.have_quorum([201, 404], 3), False)
@@ -648,3 +652,88 @@ class TestFuncs(unittest.TestCase):
self.assertEqual(v, dst_headers[k.lower()])
for k, v in bad_hdrs.iteritems():
self.assertFalse(k.lower() in dst_headers)
+
+ def test_client_chunk_size(self):
+
+ class TestSource(object):
+ def __init__(self, chunks):
+ self.chunks = list(chunks)
+
+ def read(self, _read_size):
+ if self.chunks:
+ return self.chunks.pop(0)
+ else:
+ return ''
+
+ source = TestSource((
+ 'abcd', '1234', 'abc', 'd1', '234abcd1234abcd1', '2'))
+ req = Request.blank('/v1/a/c/o')
+ node = {}
+ handler = GetOrHeadHandler(self.app, req, None, None, None, None, {},
+ client_chunk_size=8)
+
+ app_iter = handler._make_app_iter(req, node, source)
+ client_chunks = list(app_iter)
+ self.assertEqual(client_chunks, [
+ 'abcd1234', 'abcd1234', 'abcd1234', 'abcd12'])
+
+ def test_client_chunk_size_resuming(self):
+
+ class TestSource(object):
+ def __init__(self, chunks):
+ self.chunks = list(chunks)
+
+ def read(self, _read_size):
+ if self.chunks:
+ chunk = self.chunks.pop(0)
+ if chunk is None:
+ raise exceptions.ChunkReadTimeout()
+ else:
+ return chunk
+ else:
+ return ''
+
+ node = {'ip': '1.2.3.4', 'port': 6000, 'device': 'sda'}
+
+ source1 = TestSource(['abcd', '1234', 'abc', None])
+ source2 = TestSource(['efgh5678'])
+ req = Request.blank('/v1/a/c/o')
+ handler = GetOrHeadHandler(
+ self.app, req, 'Object', None, None, None, {},
+ client_chunk_size=8)
+
+ app_iter = handler._make_app_iter(req, node, source1)
+ with patch.object(handler, '_get_source_and_node',
+ lambda: (source2, node)):
+ client_chunks = list(app_iter)
+ self.assertEqual(client_chunks, ['abcd1234', 'efgh5678'])
+ self.assertEqual(handler.backend_headers['Range'], 'bytes=8-')
+
+ def test_bytes_to_skip(self):
+ # if you start at the beginning, skip nothing
+ self.assertEqual(bytes_to_skip(1024, 0), 0)
+
+ # missed the first 10 bytes, so we've got 1014 bytes of partial
+ # record
+ self.assertEqual(bytes_to_skip(1024, 10), 1014)
+
+ # skipped some whole records first
+ self.assertEqual(bytes_to_skip(1024, 4106), 1014)
+
+ # landed on a record boundary
+ self.assertEqual(bytes_to_skip(1024, 1024), 0)
+ self.assertEqual(bytes_to_skip(1024, 2048), 0)
+
+ # big numbers
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32), 0)
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 1), 2 ** 20 - 1)
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 2 ** 19), 2 ** 19)
+
+ # odd numbers
+ self.assertEqual(bytes_to_skip(123, 0), 0)
+ self.assertEqual(bytes_to_skip(123, 23), 100)
+ self.assertEqual(bytes_to_skip(123, 247), 122)
+
+ # prime numbers
+ self.assertEqual(bytes_to_skip(11, 7), 4)
+ self.assertEqual(bytes_to_skip(97, 7873823), 55)
diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py
index 002582a1a..a38e753ae 100755
--- a/test/unit/proxy/controllers/test_obj.py
+++ b/test/unit/proxy/controllers/test_obj.py
@@ -14,11 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import email.parser
import itertools
import random
import time
import unittest
+from collections import defaultdict
from contextlib import contextmanager
+import json
+from hashlib import md5
import mock
from eventlet import Timeout
@@ -26,13 +30,26 @@ from eventlet import Timeout
import swift
from swift.common import utils, swob
from swift.proxy import server as proxy_server
-from swift.common.storage_policy import StoragePolicy, POLICIES
+from swift.proxy.controllers import obj
+from swift.proxy.controllers.base import get_info as _real_get_info
+from swift.common.storage_policy import POLICIES, ECDriverError
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
- debug_logger, patch_policies
+ debug_logger, patch_policies, SlowBody
from test.unit.proxy.test_server import node_error_count
+def unchunk_body(chunked_body):
+ body = ''
+ remaining = chunked_body
+ while remaining:
+ hex_length, remaining = remaining.split('\r\n', 1)
+ length = int(hex_length, 16)
+ body += remaining[:length]
+ remaining = remaining[length + 2:]
+ return body
+
+
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
@@ -55,31 +72,76 @@ def set_http_connect(*args, **kwargs):
class PatchedObjControllerApp(proxy_server.Application):
"""
- This patch is just a hook over handle_request to ensure that when
- get_controller is called the ObjectController class is patched to
- return a (possibly stubbed) ObjectController class.
+ This patch is just a hook over the proxy server's __call__ to ensure
+ that calls to get_info will return the stubbed value for
+ container_info if it's a container info call.
"""
- object_controller = proxy_server.ObjectController
+ container_info = {}
+ per_container_info = {}
+
+ def __call__(self, *args, **kwargs):
- def handle_request(self, req):
- with mock.patch('swift.proxy.server.ObjectController',
- new=self.object_controller):
- return super(PatchedObjControllerApp, self).handle_request(req)
+ def _fake_get_info(app, env, account, container=None, **kwargs):
+ if container:
+ if container in self.per_container_info:
+ return self.per_container_info[container]
+ return self.container_info
+ else:
+ return _real_get_info(app, env, account, container, **kwargs)
+ mock_path = 'swift.proxy.controllers.base.get_info'
+ with mock.patch(mock_path, new=_fake_get_info):
+ return super(
+ PatchedObjControllerApp, self).__call__(*args, **kwargs)
+
+
+class BaseObjectControllerMixin(object):
+ container_info = {
+ 'write_acl': None,
+ 'read_acl': None,
+ 'storage_policy': None,
+ 'sync_key': None,
+ 'versions': None,
+ }
+
+ # this needs to be set on the test case
+ controller_cls = None
-@patch_policies([StoragePolicy(0, 'zero', True,
- object_ring=FakeRing(max_more_nodes=9))])
-class TestObjControllerWriteAffinity(unittest.TestCase):
def setUp(self):
- self.app = proxy_server.Application(
+ # setup fake rings with handoffs
+ for policy in POLICIES:
+ policy.object_ring.max_more_nodes = policy.object_ring.replicas
+
+ self.logger = debug_logger('proxy-server')
+ self.logger.thread_locals = ('txn1', '127.0.0.2')
+ self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), logger=debug_logger())
- self.app.request_node_count = lambda ring: 10000000
- self.app.sort_nodes = lambda l: l # stop shuffling the primary nodes
+ container_ring=FakeRing(), logger=self.logger)
+ # you can over-ride the container_info just by setting it on the app
+ self.app.container_info = dict(self.container_info)
+ # default policy and ring references
+ self.policy = POLICIES.default
+ self.obj_ring = self.policy.object_ring
+ self._ts_iter = (utils.Timestamp(t) for t in
+ itertools.count(int(time.time())))
+
+ def ts(self):
+ return self._ts_iter.next()
+
+ def replicas(self, policy=None):
+ policy = policy or POLICIES.default
+ return policy.object_ring.replicas
+
+ def quorum(self, policy=None):
+ policy = policy or POLICIES.default
+ return policy.quorum
def test_iter_nodes_local_first_noops_when_no_affinity(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ # this test needs a stable node order - most don't
+ self.app.sort_nodes = lambda l: l
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = None
object_ring = self.app.get_object_ring(None)
all_nodes = object_ring.get_part_nodes(1)
@@ -93,80 +155,335 @@ class TestObjControllerWriteAffinity(unittest.TestCase):
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
- self.app.write_affinity_node_count = lambda ring: 4
+ # we'll write to one more than replica count local nodes
+ self.app.write_affinity_node_count = lambda r: r + 1
object_ring = self.app.get_object_ring(None)
+ # make our fake ring have plenty of nodes, and not get limited
+ # artificially by the proxy max request node count
+ object_ring.max_more_nodes = 100000
+ self.app.request_node_count = lambda r: 100000
+
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
+ # i guess fake_ring wants the get_more_nodes iter to more safely be
+ # converted to a list with a smallish sort of limit which *can* be
+ # lower than max_more_nodes
+ fake_rings_real_max_more_nodes_value = object_ring.replicas ** 2
+ self.assertEqual(len(all_nodes), fake_rings_real_max_more_nodes_value)
+
+ # make sure we have enough local nodes (sanity)
+ all_local_nodes = [n for n in all_nodes if
+ self.app.write_affinity_is_local_fn(n)]
+ self.assertTrue(len(all_local_nodes) >= self.replicas() + 1)
+
+ # finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# the local nodes move up in the ordering
- self.assertEqual([1, 1, 1, 1],
- [node['region'] for node in local_first_nodes[:4]])
+ self.assertEqual([1] * (self.replicas() + 1), [
+ node['region'] for node in local_first_nodes[
+ :self.replicas() + 1]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
+ def test_iter_nodes_local_first_best_effort(self):
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+ self.app.write_affinity_is_local_fn = (
+ lambda node: node['region'] == 1)
+
+ object_ring = self.app.get_object_ring(None)
+ all_nodes = object_ring.get_part_nodes(1)
+ all_nodes.extend(object_ring.get_more_nodes(1))
+
+ local_first_nodes = list(controller.iter_nodes_local_first(
+ object_ring, 1))
+
+ # we won't have quite enough local nodes...
+ self.assertEqual(len(all_nodes), self.replicas() +
+ POLICIES.default.object_ring.max_more_nodes)
+ all_local_nodes = [n for n in all_nodes if
+ self.app.write_affinity_is_local_fn(n)]
+ self.assertEqual(len(all_local_nodes), self.replicas())
+ # but the local nodes we do have are at the front of the local iter
+ first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
+ self.assertEqual(sorted(all_local_nodes),
+ sorted(first_n_local_first_nodes))
+ # but we *still* don't *skip* any nodes
+ self.assertEqual(len(all_nodes), len(local_first_nodes))
+ self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
+
def test_connect_put_node_timeout(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
self.app.conn_timeout = 0.05
with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', '', {}, ('', ''))
self.assertTrue(res is None)
+ def test_DELETE_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [204] * self.replicas()
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_missing_one(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [404] + [204] * (self.replicas() - 1)
+ random.shuffle(codes)
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
-@patch_policies([
- StoragePolicy(0, 'zero', True),
- StoragePolicy(1, 'one'),
- StoragePolicy(2, 'two'),
-])
-class TestObjController(unittest.TestCase):
- container_info = {
- 'partition': 1,
- 'nodes': [
- {'ip': '127.0.0.1', 'port': '1', 'device': 'sda'},
- {'ip': '127.0.0.1', 'port': '2', 'device': 'sda'},
- {'ip': '127.0.0.1', 'port': '3', 'device': 'sda'},
- ],
- 'write_acl': None,
- 'read_acl': None,
- 'storage_policy': None,
- 'sync_key': None,
- 'versions': None,
- }
+ def test_DELETE_not_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [404] * (self.replicas() - 1) + [204]
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
- def setUp(self):
- # setup fake rings with handoffs
- self.obj_ring = FakeRing(max_more_nodes=3)
- for policy in POLICIES:
- policy.object_ring = self.obj_ring
+ def test_DELETE_mostly_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ mostly_204s = [204] * self.quorum()
+ codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
+ self.assertEqual(len(codes), self.replicas())
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
- logger = debug_logger('proxy-server')
- logger.thread_locals = ('txn1', '127.0.0.2')
- self.app = PatchedObjControllerApp(
- None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), logger=logger)
+ def test_DELETE_mostly_not_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ mostly_404s = [404] * self.quorum()
+ codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
+ self.assertEqual(len(codes), self.replicas())
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
+
+ def test_DELETE_half_not_found_statuses(self):
+ self.obj_ring.set_replicas(4)
+
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ with set_http_connect(404, 204, 404, 204):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_half_not_found_headers_and_body(self):
+ # Transformed responses have bogus bodies and headers, so make sure we
+ # send the client headers and body from a real node's response.
+ self.obj_ring.set_replicas(4)
+
+ status_codes = (404, 404, 204, 204)
+ bodies = ('not found', 'not found', '', '')
+ headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
+
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ with set_http_connect(*status_codes, body_iter=bodies,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+ self.assertEquals(resp.headers.get('Pick-Me'), 'yes')
+ self.assertEquals(resp.body, '')
+
+ def test_DELETE_handoff(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [204] * self.replicas()
+ with set_http_connect(507, *codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_POST_non_int_delete_after(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-After', resp.body)
+
+ def test_PUT_non_int_delete_after(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-After', resp.body)
+
+ def test_POST_negative_delete_after(self):
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '-60'})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-After in past', resp.body)
+
+ def test_PUT_negative_delete_after(self):
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '-60'})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-After in past', resp.body)
+
+ def test_POST_delete_at_non_integer(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-At', resp.body)
+
+ def test_PUT_delete_at_non_integer(self):
+ t = str(int(time.time() - 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-At', resp.body)
+
+ def test_POST_delete_at_in_past(self):
+ t = str(int(time.time() - 100))
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-At in past', resp.body)
+
+ def test_PUT_delete_at_in_past(self):
+ t = str(int(time.time() - 100))
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-At in past', resp.body)
- class FakeContainerInfoObjController(proxy_server.ObjectController):
+ def test_HEAD_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
+ with set_http_connect(200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
- def container_info(controller, *args, **kwargs):
- patch_path = 'swift.proxy.controllers.base.get_info'
- with mock.patch(patch_path) as mock_get_info:
- mock_get_info.return_value = dict(self.container_info)
- return super(FakeContainerInfoObjController,
- controller).container_info(*args, **kwargs)
+ def test_HEAD_x_newest(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ with set_http_connect(200, 200, 200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_HEAD_x_newest_different_timestamps(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ timestamps = [next(ts) for i in range(3)]
+ newest_timestamp = timestamps[-1]
+ random.shuffle(timestamps)
+ backend_response_headers = [{
+ 'X-Backend-Timestamp': t.internal,
+ 'X-Timestamp': t.normal
+ } for t in timestamps]
+ with set_http_connect(200, 200, 200,
+ headers=backend_response_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
- # this is taking advantage of the fact that self.app is a
- # PachedObjControllerApp, so handle_response will route into an
- # instance of our FakeContainerInfoObjController just by
- # overriding the class attribute for object_controller
- self.app.object_controller = FakeContainerInfoObjController
+ def test_HEAD_x_newest_with_two_vector_timestamps(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(time.time(), offset=offset)
+ for offset in itertools.count())
+ timestamps = [next(ts) for i in range(3)]
+ newest_timestamp = timestamps[-1]
+ random.shuffle(timestamps)
+ backend_response_headers = [{
+ 'X-Backend-Timestamp': t.internal,
+ 'X-Timestamp': t.normal
+ } for t in timestamps]
+ with set_http_connect(200, 200, 200,
+ headers=backend_response_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['x-backend-timestamp'],
+ newest_timestamp.internal)
+
+ def test_HEAD_x_newest_with_some_missing(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ request_count = self.app.request_node_count(self.obj_ring.replicas)
+ backend_response_headers = [{
+ 'x-timestamp': next(ts).normal,
+ } for i in range(request_count)]
+ responses = [404] * (request_count - 1)
+ responses.append(200)
+ request_log = []
+
+ def capture_requests(ip, port, device, part, method, path,
+ headers=None, **kwargs):
+ req = {
+ 'ip': ip,
+ 'port': port,
+ 'device': device,
+ 'part': part,
+ 'method': method,
+ 'path': path,
+ 'headers': headers,
+ }
+ request_log.append(req)
+ with set_http_connect(*responses,
+ headers=backend_response_headers,
+ give_connect=capture_requests):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ for req in request_log:
+ self.assertEqual(req['method'], 'HEAD')
+ self.assertEqual(req['path'], '/a/c/o')
+
+ def test_container_sync_delete(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='DELETE', headers={
+ 'X-Timestamp': ts.next().internal})
+ codes = [409] * self.obj_ring.replicas
+ ts_iter = itertools.repeat(ts.next().internal)
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_PUT_requires_length(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 411)
+
+# end of BaseObjectControllerMixin
+
+
+@patch_policies()
+class TestReplicatedObjController(BaseObjectControllerMixin,
+ unittest.TestCase):
+
+ controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
@@ -279,56 +596,6 @@ class TestObjController(unittest.TestCase):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 404)
- def test_DELETE_simple(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
- with set_http_connect(204, 204, 204):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 204)
-
- def test_DELETE_missing_one(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
- with set_http_connect(404, 204, 204):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 204)
-
- def test_DELETE_half_not_found_statuses(self):
- self.obj_ring.set_replicas(4)
-
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
- with set_http_connect(404, 204, 404, 204):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 204)
-
- def test_DELETE_half_not_found_headers_and_body(self):
- # Transformed responses have bogus bodies and headers, so make sure we
- # send the client headers and body from a real node's response.
- self.obj_ring.set_replicas(4)
-
- status_codes = (404, 404, 204, 204)
- bodies = ('not found', 'not found', '', '')
- headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
-
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
- with set_http_connect(*status_codes, body_iter=bodies,
- headers=headers):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 204)
- self.assertEquals(resp.headers.get('Pick-Me'), 'yes')
- self.assertEquals(resp.body, '')
-
- def test_DELETE_not_found(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
- with set_http_connect(404, 404, 204):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 404)
-
- def test_DELETE_handoff(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
- codes = [204] * self.obj_ring.replicas
- with set_http_connect(507, *codes):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 204)
-
def test_POST_as_COPY_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
head_resp = [200] * self.obj_ring.replicas + \
@@ -364,45 +631,11 @@ class TestObjController(unittest.TestCase):
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
- def test_POST_non_int_delete_after(self):
- t = str(int(time.time() + 100)) + '.1'
- req = swob.Request.blank('/v1/a/c/o', method='POST',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': t})
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('Non-integer X-Delete-After', resp.body)
-
- def test_POST_negative_delete_after(self):
- req = swob.Request.blank('/v1/a/c/o', method='POST',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '-60'})
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('X-Delete-After in past', resp.body)
-
- def test_POST_delete_at_non_integer(self):
- t = str(int(time.time() + 100)) + '.1'
- req = swob.Request.blank('/v1/a/c/o', method='POST',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('Non-integer X-Delete-At', resp.body)
-
- def test_POST_delete_at_in_past(self):
- t = str(int(time.time() - 100))
- req = swob.Request.blank('/v1/a/c/o', method='POST',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('X-Delete-At in past', resp.body)
-
- def test_PUT_converts_delete_after_to_delete_at(self):
+ def test_PUT_delete_at(self):
+ t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '60'})
+ 'X-Delete-At': t})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
@@ -410,44 +643,20 @@ class TestObjController(unittest.TestCase):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
- t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
- with mock.patch('time.time', lambda: t):
- resp = req.get_response(self.app)
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
- expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
- self.assertEquals(given_headers.get('X-Delete-At'),
- expected_delete_at)
+ self.assertEquals(given_headers.get('X-Delete-At'), t)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
- def test_PUT_non_int_delete_after(self):
- t = str(int(time.time() + 100)) + '.1'
- req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': t})
- with set_http_connect():
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('Non-integer X-Delete-After', resp.body)
-
- def test_PUT_negative_delete_after(self):
- req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '-60'})
- with set_http_connect():
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('X-Delete-After in past', resp.body)
-
- def test_PUT_delete_at(self):
- t = str(int(time.time() + 100))
+ def test_PUT_converts_delete_after_to_delete_at(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
+ 'X-Delete-After': '60'})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
@@ -455,40 +664,24 @@ class TestObjController(unittest.TestCase):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
+ t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
- resp = req.get_response(self.app)
+ with mock.patch('time.time', lambda: t):
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
+ expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
- self.assertEquals(given_headers.get('X-Delete-At'), t)
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ expected_delete_at)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
- def test_PUT_delete_at_non_integer(self):
- t = str(int(time.time() - 100)) + '.1'
- req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- with set_http_connect():
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('Non-integer X-Delete-At', resp.body)
-
- def test_PUT_delete_at_in_past(self):
- t = str(int(time.time() - 100))
- req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- with set_http_connect():
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual('X-Delete-At in past', resp.body)
-
def test_container_sync_put_x_timestamp_not_found(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
- self.container_info['storage_policy'] = policy_index
+ self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
@@ -502,7 +695,7 @@ class TestObjController(unittest.TestCase):
def test_container_sync_put_x_timestamp_match(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
- self.container_info['storage_policy'] = policy_index
+ self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
@@ -518,7 +711,7 @@ class TestObjController(unittest.TestCase):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
- self.container_info['storage_policy'] = policy_index
+ self.app.container_info['storage_policy'] = policy_index
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
@@ -544,19 +737,6 @@ class TestObjController(unittest.TestCase):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
- def test_container_sync_delete(self):
- ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
- test_indexes = [None] + [int(p) for p in POLICIES]
- for policy_index in test_indexes:
- req = swob.Request.blank(
- '/v1/a/c/o', method='DELETE', headers={
- 'X-Timestamp': ts.next().internal})
- codes = [409] * self.obj_ring.replicas
- ts_iter = itertools.repeat(ts.next().internal)
- with set_http_connect(*codes, timestamps=ts_iter):
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 409)
-
def test_put_x_timestamp_conflict(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
@@ -624,88 +804,6 @@ class TestObjController(unittest.TestCase):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
- def test_HEAD_simple(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
- with set_http_connect(200):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 200)
-
- def test_HEAD_x_newest(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
- headers={'X-Newest': 'true'})
- with set_http_connect(200, 200, 200):
- resp = req.get_response(self.app)
- self.assertEquals(resp.status_int, 200)
-
- def test_HEAD_x_newest_different_timestamps(self):
- req = swob.Request.blank('/v1/a/c/o', method='HEAD',
- headers={'X-Newest': 'true'})
- ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
- timestamps = [next(ts) for i in range(3)]
- newest_timestamp = timestamps[-1]
- random.shuffle(timestamps)
- backend_response_headers = [{
- 'X-Backend-Timestamp': t.internal,
- 'X-Timestamp': t.normal
- } for t in timestamps]
- with set_http_connect(200, 200, 200,
- headers=backend_response_headers):
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
-
- def test_HEAD_x_newest_with_two_vector_timestamps(self):
- req = swob.Request.blank('/v1/a/c/o', method='HEAD',
- headers={'X-Newest': 'true'})
- ts = (utils.Timestamp(time.time(), offset=offset)
- for offset in itertools.count())
- timestamps = [next(ts) for i in range(3)]
- newest_timestamp = timestamps[-1]
- random.shuffle(timestamps)
- backend_response_headers = [{
- 'X-Backend-Timestamp': t.internal,
- 'X-Timestamp': t.normal
- } for t in timestamps]
- with set_http_connect(200, 200, 200,
- headers=backend_response_headers):
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- self.assertEqual(resp.headers['x-backend-timestamp'],
- newest_timestamp.internal)
-
- def test_HEAD_x_newest_with_some_missing(self):
- req = swob.Request.blank('/v1/a/c/o', method='HEAD',
- headers={'X-Newest': 'true'})
- ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
- request_count = self.app.request_node_count(self.obj_ring.replicas)
- backend_response_headers = [{
- 'x-timestamp': next(ts).normal,
- } for i in range(request_count)]
- responses = [404] * (request_count - 1)
- responses.append(200)
- request_log = []
-
- def capture_requests(ip, port, device, part, method, path,
- headers=None, **kwargs):
- req = {
- 'ip': ip,
- 'port': port,
- 'device': device,
- 'part': part,
- 'method': method,
- 'path': path,
- 'headers': headers,
- }
- request_log.append(req)
- with set_http_connect(*responses,
- headers=backend_response_headers,
- give_connect=capture_requests):
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- for req in request_log:
- self.assertEqual(req['method'], 'HEAD')
- self.assertEqual(req['path'], '/a/c/o')
-
def test_PUT_log_info(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['x-copy-from'] = 'some/where'
@@ -731,18 +829,15 @@ class TestObjController(unittest.TestCase):
self.assertEquals(req.environ.get('swift.log_info'), None)
-@patch_policies([
- StoragePolicy(0, 'zero', True),
- StoragePolicy(1, 'one'),
- StoragePolicy(2, 'two'),
-])
-class TestObjControllerLegacyCache(TestObjController):
+@patch_policies(legacy_only=True)
+class TestObjControllerLegacyCache(TestReplicatedObjController):
"""
This test pretends like memcache returned a stored value that should
resemble whatever "old" format. It catches KeyErrors you'd get if your
code was expecting some new format during a rolling upgrade.
"""
+ # in this case policy_index is missing
container_info = {
'read_acl': None,
'write_acl': None,
@@ -750,6 +845,567 @@ class TestObjControllerLegacyCache(TestObjController):
'versions': None,
}
+ def test_invalid_storage_policy_cache(self):
+ self.app.container_info['storage_policy'] = 1
+ for method in ('GET', 'HEAD', 'POST', 'PUT', 'COPY'):
+ req = swob.Request.blank('/v1/a/c/o', method=method)
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 503)
+
+
+@patch_policies(with_ec_default=True)
+class TestECObjController(BaseObjectControllerMixin, unittest.TestCase):
+ container_info = {
+ 'read_acl': None,
+ 'write_acl': None,
+ 'sync_key': None,
+ 'versions': None,
+ 'storage_policy': '0',
+ }
+
+ controller_cls = obj.ECObjectController
+
+ def test_determine_chunk_destinations(self):
+ class FakePutter(object):
+ def __init__(self, index):
+ self.node_index = index
+
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+
+ # create a dummy list of putters, check no handoffs
+ putters = []
+ for index in range(0, 4):
+ putters.append(FakePutter(index))
+ got = controller._determine_chunk_destinations(putters)
+ expected = {}
+ for i, p in enumerate(putters):
+ expected[p] = i
+ self.assertEquals(got, expected)
+
+ # now lets make a handoff at the end
+ putters[3].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[3].node_index = 3
+
+ # now lets make a handoff at the start
+ putters[0].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[0].node_index = 0
+
+ # now lets make a handoff in the middle
+ putters[2].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[2].node_index = 0
+
+ # now lets make all of them handoffs
+ for index in range(0, 4):
+ putters[index].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+
+ def test_GET_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ get_resp = [200] * self.policy.ec_ndata
+ with set_http_connect(*get_resp):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_simple_x_newest(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o',
+ headers={'X-Newest': 'true'})
+ codes = [200] * self.replicas()
+ codes += [404] * self.obj_ring.max_more_nodes
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_error(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ get_resp = [503] + [200] * self.policy.ec_ndata
+ with set_http_connect(*get_resp):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_with_body(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ # turn a real body into fragments
+ segment_size = self.policy.ec_segment_size
+ real_body = ('asdf' * segment_size)[:-10]
+ # split it up into chunks
+ chunks = [real_body[x:x + segment_size]
+ for x in range(0, len(real_body), segment_size)]
+ fragment_payloads = []
+ for chunk in chunks:
+ fragments = self.policy.pyeclib_driver.encode(chunk)
+ if not fragments:
+ break
+ fragment_payloads.append(fragments)
+ # sanity
+ sanity_body = ''
+ for fragment_payload in fragment_payloads:
+ sanity_body += self.policy.pyeclib_driver.decode(
+ fragment_payload)
+ self.assertEqual(len(real_body), len(sanity_body))
+ self.assertEqual(real_body, sanity_body)
+
+ node_fragments = zip(*fragment_payloads)
+ self.assertEqual(len(node_fragments), self.replicas()) # sanity
+ responses = [(200, ''.join(node_fragments[i]), {})
+ for i in range(POLICIES.default.ec_ndata)]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+ self.assertEqual(len(real_body), len(resp.body))
+ self.assertEqual(real_body, resp.body)
+
+ def test_PUT_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_with_explicit_commit_status(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 100, 201)] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_error(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [503] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_mostly_success(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.quorum()
+ codes += [503] * (self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_error_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 503, Exception('not used'))] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_mostly_success_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.quorum()
+ codes += [(100, 503, Exception('not used'))] * (
+ self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_mostly_error_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 503, Exception('not used'))] * self.quorum()
+ codes += [201] * (self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_commit_timeout(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * (self.replicas() - 1)
+ codes.append((100, Timeout(), Exception('not used')))
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_commit_exception(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * (self.replicas() - 1)
+ codes.append((100, Exception('kaboom!'), Exception('not used')))
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_with_body(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ segment_size = self.policy.ec_segment_size
+ test_body = ('asdf' * segment_size)[:-10]
+ etag = md5(test_body).hexdigest()
+ size = len(test_body)
+ req.body = test_body
+ codes = [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+
+ put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
+
+ def capture_body(conn_id, chunk):
+ put_requests[conn_id]['chunks'].append(chunk)
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ conn_id = kwargs['connection_id']
+ put_requests[conn_id]['boundary'] = headers[
+ 'X-Backend-Obj-Multipart-Mime-Boundary']
+
+ with set_http_connect(*codes, expect_headers=expect_headers,
+ give_send=capture_body,
+ give_connect=capture_headers):
+ resp = req.get_response(self.app)
+
+ self.assertEquals(resp.status_int, 201)
+ frag_archives = []
+ for connection_id, info in put_requests.items():
+ body = unchunk_body(''.join(info['chunks']))
+ self.assertTrue(info['boundary'] is not None,
+ "didn't get boundary for conn %r" % (
+ connection_id,))
+
+ # email.parser.FeedParser doesn't know how to take a multipart
+ # message and boundary together and parse it; it only knows how
+ # to take a string, parse the headers, and figure out the
+ # boundary on its own.
+ parser = email.parser.FeedParser()
+ parser.feed(
+ "Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
+ info['boundary'])
+ parser.feed(body)
+ message = parser.close()
+
+ self.assertTrue(message.is_multipart()) # sanity check
+ mime_parts = message.get_payload()
+ self.assertEqual(len(mime_parts), 3)
+ obj_part, footer_part, commit_part = mime_parts
+
+ # attach the body to frag_archives list
+ self.assertEqual(obj_part['X-Document'], 'object body')
+ frag_archives.append(obj_part.get_payload())
+
+ # validate some footer metadata
+ self.assertEqual(footer_part['X-Document'], 'object metadata')
+ footer_metadata = json.loads(footer_part.get_payload())
+ self.assertTrue(footer_metadata)
+ expected = {
+ 'X-Object-Sysmeta-EC-Content-Length': str(size),
+ 'X-Backend-Container-Update-Override-Size': str(size),
+ 'X-Object-Sysmeta-EC-Etag': etag,
+ 'X-Backend-Container-Update-Override-Etag': etag,
+ 'X-Object-Sysmeta-EC-Segment-Size': str(segment_size),
+ }
+ for header, value in expected.items():
+ self.assertEqual(footer_metadata[header], value)
+
+ # sanity on commit message
+ self.assertEqual(commit_part['X-Document'], 'put commit')
+
+ self.assertEqual(len(frag_archives), self.replicas())
+ fragment_size = self.policy.fragment_size
+ node_payloads = []
+ for fa in frag_archives:
+ payload = [fa[x:x + fragment_size]
+ for x in range(0, len(fa), fragment_size)]
+ node_payloads.append(payload)
+ fragment_payloads = zip(*node_payloads)
+
+ expected_body = ''
+ for fragment_payload in fragment_payloads:
+ self.assertEqual(len(fragment_payload), self.replicas())
+ if True:
+ fragment_payload = list(fragment_payload)
+ expected_body += self.policy.pyeclib_driver.decode(
+ fragment_payload)
+
+ self.assertEqual(len(test_body), len(expected_body))
+ self.assertEqual(test_body, expected_body)
+
+ def test_PUT_old_obj_server(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ responses = [
+ # one server will response 100-continue but not include the
+ # needful expect headers and the connection will be dropped
+ ((100, Exception('not used')), {}),
+ ] + [
+ # and pleanty of successful responses too
+ (201, {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes',
+ }),
+ ] * self.replicas()
+ random.shuffle(responses)
+ if responses[-1][0] != 201:
+ # whoops, stupid random
+ responses = responses[1:] + [responses[0]]
+ codes, expect_headers = zip(*responses)
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_from_replicated(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c2 use replicated storage policy 1
+ self.app.per_container_info['c2']['storage_policy'] = '1'
+
+ # a put request with copy from source c2
+ req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
+ body='', headers={
+ 'X-Copy-From': 'c2/o'})
+
+ # c2 get
+ codes = [200] * self.replicas(POLICIES[1])
+ codes += [404] * POLICIES[1].object_ring.max_more_nodes
+ # c1 put
+ codes += [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_to_replicated(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c1 use replicated storage policy 1
+ self.app.per_container_info['c1']['storage_policy'] = '1'
+
+ # a put request with copy from source c2
+ req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
+ body='', headers={
+ 'X-Copy-From': 'c2/o'})
+
+ # c2 get
+ codes = [200] * self.replicas()
+ codes += [404] * self.obj_ring.max_more_nodes
+ headers = {
+ 'X-Object-Sysmeta-Ec-Content-Length': 0,
+ }
+ # c1 put
+ codes += [201] * self.replicas(POLICIES[1])
+ with set_http_connect(*codes, headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_unknown(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c1 use some made up storage policy index
+ self.app.per_container_info['c1']['storage_policy'] = '13'
+
+ # a COPY request of c2 with destination in c1
+ req = swift.common.swob.Request.blank('/v1/a/c2/o', method='COPY',
+ body='', headers={
+ 'Destination': 'c1/o'})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 503)
+
+ def _make_ec_archive_bodies(self, test_body, policy=None):
+ policy = policy or self.policy
+ segment_size = policy.ec_segment_size
+ # split up the body into buffers
+ chunks = [test_body[x:x + segment_size]
+ for x in range(0, len(test_body), segment_size)]
+ # encode the buffers into fragment payloads
+ fragment_payloads = []
+ for chunk in chunks:
+ fragments = self.policy.pyeclib_driver.encode(chunk)
+ if not fragments:
+ break
+ fragment_payloads.append(fragments)
+
+ # join up the fragment payloads per node
+ ec_archive_bodies = [''.join(fragments)
+ for fragments in zip(*fragment_payloads)]
+ return ec_archive_bodies
+
+ def test_GET_mismatched_fragment_archives(self):
+ segment_size = self.policy.ec_segment_size
+ test_data1 = ('test' * segment_size)[:-333]
+ # N.B. the object data *length* here is different
+ test_data2 = ('blah1' * segment_size)[:-333]
+
+ etag1 = md5(test_data1).hexdigest()
+ etag2 = md5(test_data2).hexdigest()
+
+ ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
+ ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
+
+ headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1}
+ # here we're going to *lie* and say the etag here matches
+ headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1}
+
+ responses1 = [(200, body, headers1)
+ for body in ec_archive_bodies1]
+ responses2 = [(200, body, headers2)
+ for body in ec_archive_bodies2]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ # sanity check responses1
+ responses = responses1[:self.policy.ec_ndata]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(md5(resp.body).hexdigest(), etag1)
+
+ # sanity check responses2
+ responses = responses2[:self.policy.ec_ndata]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(md5(resp.body).hexdigest(), etag2)
+
+ # now mix the responses a bit
+ mix_index = random.randint(0, self.policy.ec_ndata - 1)
+ mixed_responses = responses1[:self.policy.ec_ndata]
+ mixed_responses[mix_index] = responses2[mix_index]
+
+ status_codes, body_iter, headers = zip(*mixed_responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ try:
+ resp.body
+ except ECDriverError:
+ pass
+ else:
+ self.fail('invalid ec fragment response body did not blow up!')
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ msg = error_lines[0]
+ self.assertTrue('Error decoding fragments' in msg)
+ self.assertTrue('/a/c/o' in msg)
+ log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0]
+ self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError)
+
+ def test_GET_read_timeout(self):
+ segment_size = self.policy.ec_segment_size
+ test_data = ('test' * segment_size)[:-333]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = self._make_ec_archive_bodies(test_data)
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ self.app.recoverable_node_timeout = 0.01
+ responses = [(200, SlowBody(body, 0.1), headers)
+ for body in ec_archive_bodies]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ status_codes, body_iter, headers = zip(*responses + [
+ (404, '', {}) for i in range(
+ self.policy.object_ring.max_more_nodes)])
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ # do this inside the fake http context manager, it'll try to
+ # resume but won't be able to give us all the right bytes
+ self.assertNotEqual(md5(resp.body).hexdigest(), etag)
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(self.replicas(), len(error_lines))
+ nparity = self.policy.ec_nparity
+ for line in error_lines[:nparity]:
+ self.assertTrue('retrying' in line)
+ for line in error_lines[nparity:]:
+ self.assertTrue('ChunkReadTimeout (0.01s)' in line)
+
+ def test_GET_read_timeout_resume(self):
+ segment_size = self.policy.ec_segment_size
+ test_data = ('test' * segment_size)[:-333]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = self._make_ec_archive_bodies(test_data)
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ self.app.recoverable_node_timeout = 0.05
+ # first one is slow
+ responses = [(200, SlowBody(ec_archive_bodies[0], 0.1), headers)]
+ # ... the rest are fine
+ responses += [(200, body, headers)
+ for body in ec_archive_bodies[1:]]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ status_codes, body_iter, headers = zip(
+ *responses[:self.policy.ec_ndata + 1])
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertTrue(md5(resp.body).hexdigest(), etag)
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ self.assertTrue('retrying' in error_lines[0])
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/proxy/test_mem_server.py b/test/unit/proxy/test_mem_server.py
index bc5b8794f..f8bc2e321 100644
--- a/test/unit/proxy/test_mem_server.py
+++ b/test/unit/proxy/test_mem_server.py
@@ -34,7 +34,22 @@ class TestProxyServer(test_server.TestProxyServer):
class TestObjectController(test_server.TestObjectController):
- pass
+ def test_PUT_no_etag_fallocate(self):
+ # mem server doesn't call fallocate(), believe it or not
+ pass
+
+ # these tests all go looking in the filesystem
+ def test_policy_IO(self):
+ pass
+
+ def test_PUT_ec(self):
+ pass
+
+ def test_PUT_ec_multiple_segments(self):
+ pass
+
+ def test_PUT_ec_fragment_archive_etag_mismatch(self):
+ pass
class TestContainerController(test_server.TestContainerController):
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 41f0ea3c3..08d3b363e 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +15,13 @@
# limitations under the License.
import logging
+import math
import os
+import pickle
import sys
import unittest
-from contextlib import contextmanager, nested
+from contextlib import closing, contextmanager, nested
+from gzip import GzipFile
from shutil import rmtree
from StringIO import StringIO
import gc
@@ -25,6 +29,7 @@ import time
from textwrap import dedent
from urllib import quote
from hashlib import md5
+from pyeclib.ec_iface import ECDriverError
from tempfile import mkdtemp
import weakref
import operator
@@ -35,13 +40,14 @@ import random
import mock
from eventlet import sleep, spawn, wsgi, listen, Timeout
-from swift.common.utils import json
+from swift.common.utils import hash_path, json, storage_directory, public
from test.unit import (
connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing,
FakeMemcache, debug_logger, patch_policies, write_fake_ring,
mocked_http_conn)
from swift.proxy import server as proxy_server
+from swift.proxy.controllers.obj import ReplicatedObjectController
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server
@@ -49,16 +55,18 @@ from swift.common.middleware import proxy_logging
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist
from swift.common import utils, constraints
+from swift.common.ring import RingData
from swift.common.utils import mkdirs, normalize_timestamp, NullLogger
from swift.common.wsgi import monkey_patch_mimetools, loadapp
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_container_memcache_key, \
get_account_memcache_key, cors_validation
import swift.proxy.controllers
+import swift.proxy.controllers.obj
from swift.common.swob import Request, Response, HTTPUnauthorized, \
- HTTPException, HTTPForbidden
+ HTTPException, HTTPForbidden, HeaderKeyDict
from swift.common import storage_policy
-from swift.common.storage_policy import StoragePolicy, \
+from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \
StoragePolicyCollection, POLICIES
from swift.common.request_helpers import get_sys_meta_prefix
@@ -87,10 +95,9 @@ def do_setup(the_object_server):
os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
mkdirs(_testdir)
rmtree(_testdir)
- mkdirs(os.path.join(_testdir, 'sda1'))
- mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
- mkdirs(os.path.join(_testdir, 'sdb1'))
- mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
+ for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
+ 'sdf1', 'sdg1', 'sdh1', 'sdi1'):
+ mkdirs(os.path.join(_testdir, drive, 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
@@ -102,8 +109,10 @@ def do_setup(the_object_server):
con2lis = listen(('localhost', 0))
obj1lis = listen(('localhost', 0))
obj2lis = listen(('localhost', 0))
+ obj3lis = listen(('localhost', 0))
+ objsocks = [obj1lis, obj2lis, obj3lis]
_test_sockets = \
- (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
+ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
account_devs = [
{'port': acc1lis.getsockname()[1]},
@@ -119,27 +128,45 @@ def do_setup(the_object_server):
storage_policy._POLICIES = StoragePolicyCollection([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
- StoragePolicy(2, 'two', False)])
+ StoragePolicy(2, 'two', False),
+ ECStoragePolicy(3, 'ec', ec_type='jerasure_rs_vand',
+ ec_ndata=2, ec_nparity=1, ec_segment_size=4096)])
obj_rings = {
0: ('sda1', 'sdb1'),
1: ('sdc1', 'sdd1'),
2: ('sde1', 'sdf1'),
+ # sdg1, sdh1, sdi1 taken by policy 3 (see below)
}
for policy_index, devices in obj_rings.items():
policy = POLICIES[policy_index]
- dev1, dev2 = devices
obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
obj_devs = [
- {'port': obj1lis.getsockname()[1], 'device': dev1},
- {'port': obj2lis.getsockname()[1], 'device': dev2},
- ]
+ {'port': objsock.getsockname()[1], 'device': dev}
+ for objsock, dev in zip(objsocks, devices)]
write_fake_ring(obj_ring_path, *obj_devs)
+
+ # write_fake_ring can't handle a 3-element ring, and the EC policy needs
+ # at least 3 devs to work with, so we do it manually
+ devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
+ 'port': obj1lis.getsockname()[1]},
+ {'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
+ 'port': obj2lis.getsockname()[1]},
+ {'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
+ 'port': obj3lis.getsockname()[1]}]
+ pol3_replica2part2dev_id = [[0, 1, 2, 0],
+ [1, 2, 0, 1],
+ [2, 0, 1, 2]]
+ obj3_ring_path = os.path.join(_testdir, POLICIES[3].ring_name + '.ring.gz')
+ part_shift = 30
+ with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
+ pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)
+
prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(),
logger=debug_logger('proxy'))
for policy in POLICIES:
# make sure all the rings are loaded
prosrv.get_object_ring(policy.idx)
- # don't loose this one!
+ # don't lose this one!
_test_POLICIES = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
@@ -153,8 +180,10 @@ def do_setup(the_object_server):
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
+ obj3srv = the_object_server.ObjectController(
+ conf, logger=debug_logger('obj3'))
_test_servers = \
- (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
+ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf,
logger=prosrv.logger)
@@ -165,8 +194,9 @@ def do_setup(the_object_server):
con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
+ obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
_test_coros = \
- (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
+ (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa)
# Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
@@ -280,6 +310,15 @@ def sortHeaderNames(headerNames):
return ', '.join(headers)
+def parse_headers_string(headers_str):
+ headers_dict = HeaderKeyDict()
+ for line in headers_str.split('\r\n'):
+ if ': ' in line:
+ header, value = line.split(': ', 1)
+ headers_dict[header] = value
+ return headers_dict
+
+
def node_error_count(proxy_app, ring_node):
# Reach into the proxy's internals to get the error count for a
# particular node
@@ -846,12 +885,12 @@ class TestProxyServer(unittest.TestCase):
self.assertTrue(app.admin_key is None)
def test_get_info_controller(self):
- path = '/info'
+ req = Request.blank('/info')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
- controller, path_parts = app.get_controller(path)
+ controller, path_parts = app.get_controller(req)
self.assertTrue('version' in path_parts)
self.assertTrue(path_parts['version'] is None)
@@ -861,6 +900,65 @@ class TestProxyServer(unittest.TestCase):
self.assertEqual(controller.__name__, 'InfoController')
+ def test_error_limit_methods(self):
+ logger = debug_logger('test')
+ app = proxy_server.Application({}, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing(),
+ logger=logger)
+ node = app.container_ring.get_part_nodes(0)[0]
+ # error occurred
+ app.error_occurred(node, 'test msg')
+ self.assertTrue('test msg' in
+ logger.get_lines_for_level('error')[-1])
+ self.assertEqual(1, node_error_count(app, node))
+
+ # exception occurred
+ try:
+ raise Exception('kaboom1!')
+ except Exception as e1:
+ app.exception_occurred(node, 'test1', 'test1 msg')
+ line = logger.get_lines_for_level('error')[-1]
+ self.assertTrue('test1 server' in line)
+ self.assertTrue('test1 msg' in line)
+ log_args, log_kwargs = logger.log_dict['error'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e1)
+ self.assertEqual(2, node_error_count(app, node))
+
+ # warning exception occurred
+ try:
+ raise Exception('kaboom2!')
+ except Exception as e2:
+ app.exception_occurred(node, 'test2', 'test2 msg',
+ level=logging.WARNING)
+ line = logger.get_lines_for_level('warning')[-1]
+ self.assertTrue('test2 server' in line)
+ self.assertTrue('test2 msg' in line)
+ log_args, log_kwargs = logger.log_dict['warning'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e2)
+ self.assertEqual(3, node_error_count(app, node))
+
+ # custom exception occurred
+ try:
+ raise Exception('kaboom3!')
+ except Exception as e3:
+ e3_info = sys.exc_info()
+ try:
+ raise Exception('kaboom4!')
+ except Exception:
+ pass
+ app.exception_occurred(node, 'test3', 'test3 msg',
+ level=logging.WARNING, exc_info=e3_info)
+ line = logger.get_lines_for_level('warning')[-1]
+ self.assertTrue('test3 server' in line)
+ self.assertTrue('test3 msg' in line)
+ log_args, log_kwargs = logger.log_dict['warning'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e3)
+ self.assertEqual(4, node_error_count(app, node))
+
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
@@ -981,6 +1079,23 @@ class TestObjectController(unittest.TestCase):
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
+ def put_container(self, policy_name, container_name):
+ # Note: only works if called with unpatched policies
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: 0\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Storage-Policy: %s\r\n'
+ '\r\n' % (container_name, policy_name))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 2'
+ self.assertEqual(headers[:len(exp)], exp)
+
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
kwargs = {}
@@ -1014,20 +1129,14 @@ class TestObjectController(unittest.TestCase):
@unpatch_policies
def test_policy_IO(self):
- if hasattr(_test_servers[-1], '_filesystem'):
- # ironically, the _filesystem attribute on the object server means
- # the in-memory diskfile is in use, so this test does not apply
- return
-
- def check_file(policy_idx, cont, devs, check_val):
- partition, nodes = prosrv.get_object_ring(policy_idx).get_nodes(
- 'a', cont, 'o')
+ def check_file(policy, cont, devs, check_val):
+ partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
for dev in devs:
file = df_mgr.get_diskfile(dev, partition, 'a',
cont, 'o',
- policy_idx=policy_idx)
+ policy=policy)
if check_val is True:
file.open()
@@ -1058,8 +1167,8 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
- check_file(0, 'c', ['sda1', 'sdb1'], True)
- check_file(0, 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
+ check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
+ check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
# check policy 1: put file on c1, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -1084,8 +1193,8 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
- check_file(1, 'c1', ['sdc1', 'sdd1'], True)
- check_file(1, 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
+ check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
+ check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
# check policy 2: put file on c2, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -1110,8 +1219,8 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
- check_file(2, 'c2', ['sde1', 'sdf1'], True)
- check_file(2, 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
+ check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
+ check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
@unpatch_policies
def test_policy_IO_override(self):
@@ -1146,7 +1255,7 @@ class TestObjectController(unittest.TestCase):
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
df = df_mgr.get_diskfile(node['device'], partition, 'a',
- 'c1', 'wrong-o', policy_idx=2)
+ 'c1', 'wrong-o', policy=POLICIES[2])
with df.open():
contents = ''.join(df.reader())
self.assertEqual(contents, "hello")
@@ -1178,7 +1287,7 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(res.status_int, 204)
df = df_mgr.get_diskfile(node['device'], partition, 'a',
- 'c1', 'wrong-o', policy_idx=2)
+ 'c1', 'wrong-o', policy=POLICIES[2])
try:
df.open()
except DiskFileNotExist as e:
@@ -1215,6 +1324,619 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
+ @unpatch_policies
+ def test_PUT_ec(self):
+ policy = POLICIES[3]
+ self.put_container("ec", "ec-con")
+
+ obj = 'abCD' * 10 # small, so we don't get multiple EC stripes
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o1 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ ecd = policy.pyeclib_driver
+ expected_pieces = set(ecd.encode(obj))
+
+ # go to disk to make sure it's there and all erasure-coded
+ partition, nodes = policy.object_ring.get_nodes('a', 'ec-con', 'o1')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ got_pieces = set()
+ got_indices = set()
+ got_durable = []
+ for node_index, node in enumerate(nodes):
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'o1',
+ policy=policy)
+ with df.open():
+ meta = df.get_metadata()
+ contents = ''.join(df.reader())
+ got_pieces.add(contents)
+
+ # check presence for a .durable file for the timestamp
+ durable_file = os.path.join(
+ _testdir, node['device'], storage_directory(
+ diskfile.get_data_dir(policy),
+ partition, hash_path('a', 'ec-con', 'o1')),
+ utils.Timestamp(df.timestamp).internal + '.durable')
+
+ if os.path.isfile(durable_file):
+ got_durable.append(True)
+
+ lmeta = dict((k.lower(), v) for k, v in meta.items())
+ got_indices.add(
+ lmeta['x-object-sysmeta-ec-frag-index'])
+
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-etag'],
+ md5(obj).hexdigest())
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-content-length'],
+ str(len(obj)))
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-segment-size'],
+ '4096')
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-scheme'],
+ 'jerasure_rs_vand 2+1')
+ self.assertEqual(
+ lmeta['etag'],
+ md5(contents).hexdigest())
+
+ self.assertEqual(expected_pieces, got_pieces)
+ self.assertEqual(set(('0', '1', '2')), got_indices)
+
+ # verify at least 2 puts made it all the way to the end of 2nd
+ # phase, ie at least 2 .durable statuses were written
+ num_durable_puts = sum(d is True for d in got_durable)
+ self.assertTrue(num_durable_puts >= 2)
+
+ @unpatch_policies
+ def test_PUT_ec_multiple_segments(self):
+ ec_policy = POLICIES[3]
+ self.put_container("ec", "ec-con")
+
+ pyeclib_header_size = len(ec_policy.pyeclib_driver.encode("")[0])
+ segment_size = ec_policy.ec_segment_size
+
+ # Big enough to have multiple segments. Also a multiple of the
+ # segment size to get coverage of that path too.
+ obj = 'ABC' * segment_size
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o2 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # it's a 2+1 erasure code, so each fragment archive should be half
+ # the length of the object, plus three inline pyeclib metadata
+ # things (one per segment)
+ expected_length = (len(obj) / 2 + pyeclib_header_size * 3)
+
+ partition, nodes = ec_policy.object_ring.get_nodes(
+ 'a', 'ec-con', 'o2')
+
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ got_durable = []
+ fragment_archives = []
+ for node in nodes:
+ df = df_mgr.get_diskfile(
+ node['device'], partition, 'a',
+ 'ec-con', 'o2', policy=ec_policy)
+ with df.open():
+ contents = ''.join(df.reader())
+ fragment_archives.append(contents)
+ self.assertEqual(len(contents), expected_length)
+
+ # check presence for a .durable file for the timestamp
+ durable_file = os.path.join(
+ _testdir, node['device'], storage_directory(
+ diskfile.get_data_dir(ec_policy),
+ partition, hash_path('a', 'ec-con', 'o2')),
+ utils.Timestamp(df.timestamp).internal + '.durable')
+
+ if os.path.isfile(durable_file):
+ got_durable.append(True)
+
+ # Verify that we can decode each individual fragment and that they
+ # are all the correct size
+ fragment_size = ec_policy.fragment_size
+ nfragments = int(
+ math.ceil(float(len(fragment_archives[0])) / fragment_size))
+
+ for fragment_index in range(nfragments):
+ fragment_start = fragment_index * fragment_size
+ fragment_end = (fragment_index + 1) * fragment_size
+
+ try:
+ frags = [fa[fragment_start:fragment_end]
+ for fa in fragment_archives]
+ seg = ec_policy.pyeclib_driver.decode(frags)
+ except ECDriverError:
+ self.fail("Failed to decode fragments %d; this probably "
+ "means the fragments are not the sizes they "
+ "should be" % fragment_index)
+
+ segment_start = fragment_index * segment_size
+ segment_end = (fragment_index + 1) * segment_size
+
+ self.assertEqual(seg, obj[segment_start:segment_end])
+
+ # verify at least 2 puts made it all the way to the end of 2nd
+ # phase, ie at least 2 .durable statuses were written
+ num_durable_puts = sum(d is True for d in got_durable)
+ self.assertTrue(num_durable_puts >= 2)
+
+ @unpatch_policies
+ def test_PUT_ec_object_etag_mismatch(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o3 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: %s\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5('something else').hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 422'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # nothing should have made it to disk on the object servers
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'o3')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'o3')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ for node in nodes:
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'o3', policy=POLICIES[3])
+ self.assertRaises(DiskFileNotExist, df.open)
+
+ @unpatch_policies
+ def test_PUT_ec_fragment_archive_etag_mismatch(self):
+ self.put_container("ec", "ec-con")
+
+ # Cause a hash mismatch by feeding one particular MD5 hasher some
+ # extra data. The goal here is to get exactly one of the hashers in
+ # an object server.
+ countdown = [1]
+
+ def busted_md5_constructor(initial_str=""):
+ hasher = md5(initial_str)
+ if countdown[0] == 0:
+ hasher.update('wrong')
+ countdown[0] -= 1
+ return hasher
+
+ obj = 'uvarovite-esurience-cerated-symphysic'
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ with mock.patch('swift.obj.server.md5', busted_md5_constructor):
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/pimento HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: %s\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 503' # no quorum
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # 2/3 of the fragment archives should have landed on disk
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'pimento')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'pimento')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ found = 0
+ for node in nodes:
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'pimento',
+ policy=POLICIES[3])
+ try:
+ df.open()
+ found += 1
+ except DiskFileNotExist:
+ pass
+ self.assertEqual(found, 2)
+
+ @unpatch_policies
+ def test_PUT_ec_if_none_match(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'ananepionic-lepidophyllous-ropewalker-neglectful'
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'If-None-Match: *\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 412'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ @unpatch_policies
+ def test_GET_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '0123456' * 11 * 17
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Object-Meta-Color: chartreuse\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+ self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ self.assertEqual(gotten_obj, obj)
+
+ @unpatch_policies
+ def test_conditional_GET_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'this object has an etag and is otherwise unimportant'
+ etag = md5(obj).hexdigest()
+ not_etag = md5(obj + "blahblah").hexdigest()
+
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/conditionals HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ for verb in ('GET', 'HEAD'):
+ # If-Match
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': not_etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 412)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': "*"})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ # If-None-Match
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 304)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': not_etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': "*"})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 304)
+
+ @unpatch_policies
+ def test_GET_ec_big(self):
+ self.put_container("ec", "ec-con")
+
+ # our EC segment size is 4 KiB, so this is multiple (3) segments;
+ # we'll verify that with a sanity check
+ obj = 'a moose once bit my sister' * 400
+ self.assertTrue(
+ len(obj) > POLICIES.get_by_name("ec").ec_segment_size * 2,
+ "object is too small for proper testing")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ # This may look like a redundant test, but when things fail, this
+ # has a useful failure message while the subsequent one spews piles
+ # of garbage and demolishes your terminal's scrollback buffer.
+ self.assertEqual(len(gotten_obj), len(obj))
+ self.assertEqual(gotten_obj, obj)
+
+ @unpatch_policies
+ def test_GET_ec_failure_handling(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'look at this object; it is simply amazing ' * 500
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ def explodey_iter(inner_iter):
+ yield next(inner_iter)
+ raise Exception("doom ba doom")
+
+ real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
+
+ def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
+ # Each thing in `iterators` here is a document-parts iterator,
+ # and we want to fail after getting a little into each part.
+ #
+ # That way, we ensure we've started streaming the response to
+ # the client when things go wrong.
+ return real_ec_app_iter(
+ path, policy,
+ [explodey_iter(i) for i in iterators],
+ *a, **kw)
+
+ with mock.patch("swift.proxy.controllers.obj.ECAppIter",
+ explodey_ec_app_iter):
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+
+ gotten_obj = ''
+ try:
+ with Timeout(300): # don't hang the testrun when this fails
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ except Timeout:
+ self.fail("GET hung when connection failed")
+
+ # Ensure we failed partway through, otherwise the mocks could
+ # get out of date without anyone noticing
+ self.assertTrue(0 < len(gotten_obj) < len(obj))
+
+ @unpatch_policies
+ def test_HEAD_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '0123456' * 11 * 17
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Object-Meta-Color: chartreuse\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('HEAD /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+ self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
+
+ @unpatch_policies
+ def test_GET_ec_404(self):
+ self.put_container("ec", "ec-con")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 404'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ @unpatch_policies
+ def test_HEAD_ec_404(self):
+ self.put_container("ec", "ec-con")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('HEAD /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 404'
+ self.assertEqual(headers[:len(exp)], exp)
+
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
@@ -1226,8 +1948,8 @@ class TestObjectController(unittest.TestCase):
'server!')
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
# The (201, Exception('test')) tuples in there have the effect of
# changing the status of the initial expect response. The default
# expect response from FakeConn for 201 is 100.
@@ -1262,8 +1984,8 @@ class TestObjectController(unittest.TestCase):
'non-zero byte PUT!')
with save_globals():
- controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
# the (100, 201) tuples in there are just being extra explicit
# about the FakeConn returning the 100 Continue status when the
# object controller calls getexpect. Which is FakeConn's default
@@ -1298,7 +2020,8 @@ class TestObjectController(unittest.TestCase):
self.app.write_affinity_node_count = lambda r: 3
controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
+ ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
@@ -1333,7 +2056,8 @@ class TestObjectController(unittest.TestCase):
self.app.write_affinity_node_count = lambda r: 3
controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
+ ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
self.app.error_limit(
object_ring.get_part_nodes(1)[0], 'test')
set_http_connect(200, 200, # account, container
@@ -1355,6 +2079,27 @@ class TestObjectController(unittest.TestCase):
self.assertNotEqual(0, written_to[2][1] % 2)
@unpatch_policies
+ def test_PUT_no_etag_fallocate(self):
+ with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ obj = 'hemoleucocytic-surfactant'
+ fd.write('PUT /v1/a/c/o HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+ # one for each obj server; this test has 2
+ self.assertEqual(len(mock_fallocate.mock_calls), 2)
+
+ @unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -1593,7 +2338,8 @@ class TestObjectController(unittest.TestCase):
"last_modified": "1970-01-01T00:00:01.000000"}])
body_iter = ('', '', body, '', '', '', '', '', '', '', '', '', '', '')
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
# HEAD HEAD GET GET HEAD GET GET GET PUT PUT
# PUT DEL DEL DEL
set_http_connect(200, 200, 200, 200, 200, 200, 200, 200, 201, 201,
@@ -1614,6 +2360,8 @@ class TestObjectController(unittest.TestCase):
StoragePolicy(1, 'one', True, object_ring=FakeRing())
])
def test_DELETE_on_expired_versioned_object(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
methods = set()
authorize_call_count = [0]
@@ -1646,8 +2394,8 @@ class TestObjectController(unittest.TestCase):
return None # allow the request
with save_globals():
- controller = proxy_server.ObjectController(self.app,
- 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
controller.container_info = fake_container_info
controller._listing_iter = fake_list_iter
set_http_connect(404, 404, 404, # get for the previous version
@@ -1678,6 +2426,8 @@ class TestObjectController(unittest.TestCase):
Verify that a request with read access to a versions container
is unable to cause any write operations on the versioned container.
"""
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
methods = set()
authorize_call_count = [0]
@@ -1711,8 +2461,7 @@ class TestObjectController(unittest.TestCase):
return HTTPForbidden(req) # allow the request
with save_globals():
- controller = proxy_server.ObjectController(self.app,
- 'a', 'c', 'o')
+ controller = ReplicatedObjectController(self.app, 'a', 'c', 'o')
controller.container_info = fake_container_info
# patching _listing_iter simulates request being authorized
# to list versions container
@@ -1731,8 +2480,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_auto_content_type(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_content_type(filename, expected):
# The three responses here are for account_info() (HEAD to
@@ -1778,8 +2527,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
@@ -1798,8 +2547,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_connect_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
@@ -1829,8 +2578,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_send_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
@@ -1852,8 +2601,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
@@ -1864,8 +2613,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
@@ -1875,8 +2624,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_getresponse_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
@@ -1921,6 +2670,8 @@ class TestObjectController(unittest.TestCase):
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_POST_backend_headers(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.object_post_as_copy = False
self.app.sort_nodes = lambda nodes: nodes
backend_requests = []
@@ -2191,8 +2942,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
- proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
@@ -2739,8 +3490,8 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(node_list, got_nodes)
def test_best_response_sets_headers(self):
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object', headers=[{'X-Test': '1'},
@@ -2749,8 +3500,8 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object')
@@ -2783,8 +3534,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEquals(resp.status_int, 200)
@@ -2796,8 +3547,8 @@ class TestObjectController(unittest.TestCase):
def test_error_limiting(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
@@ -2833,8 +3584,8 @@ class TestObjectController(unittest.TestCase):
def test_error_limiting_survives_ring_reload(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
@@ -2861,8 +3612,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_error_limiting(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
@@ -2880,8 +3631,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_error_limiting_last_node(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
@@ -2901,8 +3652,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
@@ -2998,8 +3749,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
self.app.object_post_as_copy = False
self.app.memcache = FakeMemcacheReturnsNone()
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
@@ -3019,8 +3770,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_POST_as_copy_requires_container_exist(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
@@ -3037,8 +3788,8 @@ class TestObjectController(unittest.TestCase):
def test_bad_metadata(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -3134,8 +3885,8 @@ class TestObjectController(unittest.TestCase):
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
- controller = proxy_server.ObjectController(self.app, account,
- container, obj)
+ controller = ReplicatedObjectController(
+ self.app, account, container, obj)
self.app.update_request(req)
self.app.memcache.store = {}
with save_globals():
@@ -3752,7 +4503,8 @@ class TestObjectController(unittest.TestCase):
def test_COPY_newest(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
@@ -3770,7 +4522,8 @@ class TestObjectController(unittest.TestCase):
def test_COPY_account_newest(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
@@ -3795,8 +4548,8 @@ class TestObjectController(unittest.TestCase):
headers=None, query_string=None):
backend_requests.append((method, path, headers))
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
@@ -3825,8 +4578,8 @@ class TestObjectController(unittest.TestCase):
headers=None, query_string=None):
backend_requests.append((method, path, headers))
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
@@ -3871,8 +4624,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
set_http_connect(201, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
@@ -3902,7 +4655,7 @@ class TestObjectController(unittest.TestCase):
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
@@ -3916,7 +4669,7 @@ class TestObjectController(unittest.TestCase):
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
@@ -3930,7 +4683,7 @@ class TestObjectController(unittest.TestCase):
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
@@ -3945,7 +4698,7 @@ class TestObjectController(unittest.TestCase):
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
@@ -3960,7 +4713,7 @@ class TestObjectController(unittest.TestCase):
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
@@ -3975,9 +4728,9 @@ class TestObjectController(unittest.TestCase):
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
- obj2srv) = _test_servers
+ obj2srv, obj3srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
@@ -4001,7 +4754,7 @@ class TestObjectController(unittest.TestCase):
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
@@ -4025,7 +4778,7 @@ class TestObjectController(unittest.TestCase):
ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
@@ -4137,7 +4890,7 @@ class TestObjectController(unittest.TestCase):
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# Also happens to assert that x-storage-token is taken as a
@@ -4168,7 +4921,7 @@ class TestObjectController(unittest.TestCase):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
pre = quote('%03x' % len(o))
@@ -4552,8 +5305,8 @@ class TestObjectController(unittest.TestCase):
@unpatch_policies
def test_conditional_range_get(self):
- (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis) = \
- _test_sockets
+ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
+ obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
@@ -4601,8 +5354,8 @@ class TestObjectController(unittest.TestCase):
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
@@ -4633,8 +5386,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assert_('accept-ranges' in resp.headers)
@@ -4645,8 +5398,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assert_('accept-ranges' in resp.headers)
@@ -4660,8 +5413,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
@@ -4676,8 +5429,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
@@ -4693,8 +5446,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
self.app.object_post_as_copy = False
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
@@ -4711,8 +5464,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
@@ -4729,8 +5482,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
@@ -4746,8 +5499,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
@@ -4759,8 +5512,8 @@ class TestObjectController(unittest.TestCase):
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
self.app.object_post_as_copy = False
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
self.app.memcache.store = {}
orig_time = time.time
@@ -4783,6 +5536,8 @@ class TestObjectController(unittest.TestCase):
StoragePolicy(1, 'one', True, object_ring=FakeRing())
])
def test_PUT_versioning_with_nonzero_default_policy(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
@@ -4808,8 +5563,8 @@ class TestObjectController(unittest.TestCase):
{'zone': 2, 'ip': '10.0.0.2', 'region': 0,
'id': 2, 'device': 'sdc', 'port': 1002}]}
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o.jpg')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
controller.container_info = fake_container_info
set_http_connect(200, 200, 200, # head: for the last version
@@ -4830,6 +5585,8 @@ class TestObjectController(unittest.TestCase):
StoragePolicy(1, 'one', True, object_ring=FakeRing())
])
def test_cross_policy_DELETE_versioning(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
requests = []
def capture_requests(ipaddr, port, device, partition, method, path,
@@ -4959,8 +5716,8 @@ class TestObjectController(unittest.TestCase):
def test_OPTIONS(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o.jpg')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
@@ -5067,7 +5824,8 @@ class TestObjectController(unittest.TestCase):
def test_CORS_valid(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
@@ -5120,7 +5878,8 @@ class TestObjectController(unittest.TestCase):
def test_CORS_valid_with_obj_headers(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
@@ -5181,7 +5940,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
@@ -5202,7 +5962,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
@@ -5224,7 +5985,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
@@ -5248,7 +6010,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
@@ -5271,7 +6034,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
@@ -5300,7 +6064,8 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
@@ -5336,7 +6101,8 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
@@ -5358,6 +6124,373 @@ class TestObjectController(unittest.TestCase):
])
+class TestECMismatchedFA(unittest.TestCase):
+ def tearDown(self):
+ prosrv = _test_servers[0]
+ # don't leak error limits and poison other tests
+ prosrv._error_limiting = {}
+
+ def test_mixing_different_objects_fragment_archives(self):
+ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
+ obj2srv, obj3srv) = _test_servers
+ ec_policy = POLICIES[3]
+
+ @public
+ def bad_disk(req):
+ return Response(status=507, body="borken")
+
+ ensure_container = Request.blank(
+ "/v1/a/ec-crazytown",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
+ resp = ensure_container.get_response(prosrv)
+ self.assertTrue(resp.status_int in (201, 202))
+
+ obj1 = "first version..."
+ put_req1 = Request.blank(
+ "/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Auth-Token": "t"})
+ put_req1.body = obj1
+
+ obj2 = u"versiĆ³n segundo".encode("utf-8")
+ put_req2 = Request.blank(
+ "/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Auth-Token": "t"})
+ put_req2.body = obj2
+
+ # pyeclib has checks for unequal-length; we don't want to trip those
+ self.assertEqual(len(obj1), len(obj2))
+
+ # Servers obj1 and obj2 will have the first version of the object
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj3srv, 'PUT', bad_disk),
+ mock.patch(
+ 'swift.common.storage_policy.ECStoragePolicy.quorum')):
+ type(ec_policy).quorum = mock.PropertyMock(return_value=2)
+ resp = put_req1.get_response(prosrv)
+ self.assertEqual(resp.status_int, 201)
+
+ # Server obj3 (and, in real life, some handoffs) will have the
+ # second version of the object.
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj1srv, 'PUT', bad_disk),
+ mock.patch.object(obj2srv, 'PUT', bad_disk),
+ mock.patch(
+ 'swift.common.storage_policy.ECStoragePolicy.quorum'),
+ mock.patch(
+ 'swift.proxy.controllers.base.Controller._quorum_size',
+ lambda *a, **kw: 1)):
+ type(ec_policy).quorum = mock.PropertyMock(return_value=1)
+ resp = put_req2.get_response(prosrv)
+ self.assertEqual(resp.status_int, 201)
+
+ # A GET that only sees 1 fragment archive should fail
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj1srv, 'GET', bad_disk),
+ mock.patch.object(obj2srv, 'GET', bad_disk)):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 503)
+
+ # A GET that sees 2 matching FAs will work
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with mock.patch.object(obj3srv, 'GET', bad_disk):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.body, obj1)
+
+ # A GET that sees 2 mismatching FAs will fail
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with mock.patch.object(obj2srv, 'GET', bad_disk):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 503)
+
+
+class TestObjectECRangedGET(unittest.TestCase):
+ def setUp(self):
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ logger=debug_logger('proxy-ut'),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+
+ @classmethod
+ def setUpClass(cls):
+ cls.obj_name = 'range-get-test'
+ cls.tiny_obj_name = 'range-get-test-tiny'
+ cls.aligned_obj_name = 'range-get-test-aligned'
+
+ # Note: only works if called with unpatched policies
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: 0\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Storage-Policy: ec\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 2'
+ assert headers[:len(exp)] == exp, "container PUT failed"
+
+ seg_size = POLICIES.get_by_name("ec").ec_segment_size
+ cls.seg_size = seg_size
+ # EC segment size is 4 KiB, hence this gives 4 segments, which we
+ # then verify with a quick sanity check
+ cls.obj = ' my hovercraft is full of eels '.join(
+ str(s) for s in range(431))
+ assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
+ "object is wrong number of segments"
+
+ cls.tiny_obj = 'tiny, tiny object'
+ assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
+
+ cls.aligned_obj = "".join(
+ "abcdEFGHijkl%04d" % x for x in range(512))
+ assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
+
+ for obj_name, obj in ((cls.obj_name, cls.obj),
+ (cls.tiny_obj_name, cls.tiny_obj),
+ (cls.aligned_obj_name, cls.aligned_obj)):
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (obj_name, len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, \
+ "object PUT failed %s" % obj_name
+
+ def _get_obj(self, range_value, obj_name=None):
+ if obj_name is None:
+ obj_name = self.obj_name
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Range: %s\r\n'
+ '\r\n' % (obj_name, range_value))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ # e.g. "HTTP/1.1 206 Partial Content\r\n..."
+ status_code = int(headers[9:12])
+ headers = parse_headers_string(headers)
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+
+ return (status_code, headers, gotten_obj)
+
+ def test_unaligned(self):
+ # One segment's worth of data, but straddling two segment boundaries
+ # (so it has data from three segments)
+ status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.obj[3783:7879])
+
+ def test_aligned_left(self):
+ # First byte is aligned to a segment boundary, last byte is not
+ status, headers, gotten_obj = self._get_obj("bytes=0-5500")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "5501")
+ self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
+ self.assertEqual(len(gotten_obj), 5501)
+ self.assertEqual(gotten_obj, self.obj[:5501])
+
+ def test_aligned_range(self):
+ # Ranged GET that wants exactly one segment
+ status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.obj[4096:8192])
+
+ def test_aligned_range_end(self):
+ # Ranged GET that wants exactly the last segment
+ status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "2225")
+ self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
+ self.assertEqual(len(gotten_obj), 2225)
+ self.assertEqual(gotten_obj, self.obj[12288:])
+
+ def test_aligned_range_aligned_obj(self):
+ # Ranged GET that wants exactly the last segment, which is full-size
+ status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
+ self.aligned_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
+
+ def test_byte_0(self):
+ # Just the first byte, but it's index 0, so that's easy to get wrong
+ status, headers, gotten_obj = self._get_obj("bytes=0-0")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "1")
+ self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
+ self.assertEqual(gotten_obj, self.obj[0])
+
+ def test_unsatisfiable(self):
+ # Goes just one byte too far off the end of the object, so it's
+ # unsatisfiable
+ status, _junk, _junk = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj), len(self.obj) + 100))
+ self.assertEqual(status, 416)
+
+ def test_off_end(self):
+ # Ranged GET that's mostly off the end of the object, but overlaps
+ # it in just the last byte
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '1')
+ self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[-1])
+
+ def test_aligned_off_end(self):
+ # Ranged GET that starts on a segment boundary but asks for a whole lot
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (8192, len(self.obj) + 100))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '6321')
+ self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[8192:])
+
+ def test_way_off_end(self):
+ # Ranged GET that's mostly off the end of the object, but overlaps
+ # it in just the last byte, and wants multiple segments' worth off
+ # the end
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '1')
+ self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[-1])
+
+ def test_boundaries(self):
+ # Wants the last byte of segment 1 + the first byte of segment 2
+ status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '2')
+ self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
+ self.assertEqual(gotten_obj, self.obj[4095:4097])
+
+ def test_until_end(self):
+ # Wants the last byte of segment 1 + the rest
+ status, headers, gotten_obj = self._get_obj("bytes=4095-")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '10418')
+ self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[4095:])
+
+ def test_small_suffix(self):
+ # Small range-suffix GET: the last 100 bytes (less than one segment)
+ status, headers, gotten_obj = self._get_obj("bytes=-100")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '100')
+ self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
+ self.assertEqual(len(gotten_obj), 100)
+ self.assertEqual(gotten_obj, self.obj[-100:])
+
+ def test_small_suffix_aligned(self):
+ # Small range-suffix GET: the last 100 bytes, last segment is
+ # full-size
+ status, headers, gotten_obj = self._get_obj("bytes=-100",
+ self.aligned_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '100')
+ self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
+ self.assertEqual(len(gotten_obj), 100)
+
+ def test_suffix_two_segs(self):
+ # Ask for enough data that we need the last two segments. The last
+ # segment is short, though, so this ensures we compensate for that.
+ #
+ # Note that the total range size is less than one full-size segment.
+ suffix_len = len(self.obj) % self.seg_size + 1
+
+ status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], str(suffix_len))
+ self.assertEqual(headers['Content-Range'],
+ 'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
+ len(self.obj) - 1,
+ len(self.obj)))
+ self.assertEqual(len(gotten_obj), suffix_len)
+
+ def test_large_suffix(self):
+ # Large range-suffix GET: the last 5000 bytes (more than one segment)
+ status, headers, gotten_obj = self._get_obj("bytes=-5000")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '5000')
+ self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
+ self.assertEqual(len(gotten_obj), 5000)
+ self.assertEqual(gotten_obj, self.obj[-5000:])
+
+ def test_overlarge_suffix(self):
+ # The last N+1 bytes of an N-byte object
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-%d" % (len(self.obj) + 1))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '14513')
+ self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
+ self.assertEqual(len(gotten_obj), len(self.obj))
+ self.assertEqual(gotten_obj, self.obj)
+
+ def test_small_suffix_tiny_object(self):
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-5", self.tiny_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '5')
+ self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
+ self.assertEqual(gotten_obj, self.tiny_obj[12:])
+
+ def test_overlarge_suffix_tiny_object(self):
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-1234567890", self.tiny_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '17')
+ self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
+ self.assertEqual(len(gotten_obj), len(self.tiny_obj))
+ self.assertEqual(gotten_obj, self.tiny_obj)
+
+
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
@@ -5600,7 +6733,7 @@ class TestContainerController(unittest.TestCase):
headers)
self.assertEqual(int(headers
['X-Backend-Storage-Policy-Index']),
- policy.idx)
+ int(policy))
# make sure all mocked responses are consumed
self.assertRaises(StopIteration, mock_conn.code_iter.next)
diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py
index d80f2855e..a45c689ab 100644
--- a/test/unit/proxy/test_sysmeta.py
+++ b/test/unit/proxy/test_sysmeta.py
@@ -135,7 +135,7 @@ class TestObjectSysmeta(unittest.TestCase):
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
- mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
+ mkdirs(os.path.join(self.testdir, 'sda', 'tmp'))
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.obj_ctlr = object_server.ObjectController(
conf, logger=debug_logger('obj-ut'))