summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CONTRIBUTING.md23
-rwxr-xr-xbin/swift-drive-audit3
-rw-r--r--doc/source/development_guidelines.rst29
-rw-r--r--doc/source/howto_installmultinode.rst2
-rw-r--r--requirements.txt2
-rwxr-xr-xswift/cli/recon.py26
-rw-r--r--swift/common/middleware/recon.py9
-rw-r--r--swift/common/middleware/tempauth.py2
-rw-r--r--swift/common/storage_policy.py6
-rw-r--r--swift/common/swob.py4
-rw-r--r--swift/container/reconciler.py6
-rw-r--r--swift/container/replicator.py6
-rw-r--r--swift/locale/swift.pot50
-rw-r--r--swift/locale/zh_CN/LC_MESSAGES/swift.po50
-rw-r--r--swift/obj/diskfile.py14
-rw-r--r--swift/proxy/controllers/obj.py561
-rw-r--r--test/functional/__init__.py252
-rw-r--r--test/probe/common.py3
-rw-r--r--test/unit/__init__.py3
-rw-r--r--test/unit/account/test_backend.py2
-rw-r--r--test/unit/cli/test_info.py11
-rw-r--r--test/unit/cli/test_recon.py37
-rw-r--r--test/unit/common/middleware/test_recon.py20
-rw-r--r--test/unit/common/middleware/test_tempauth.py22
-rw-r--r--test/unit/common/test_storage_policy.py13
-rw-r--r--test/unit/obj/test_diskfile.py44
-rwxr-xr-xtest/unit/obj/test_server.py2
-rw-r--r--test/unit/proxy/test_server.py100
-rw-r--r--test/unit/proxy/test_sysmeta.py3
29 files changed, 934 insertions, 371 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 7ba46daf9..6a81d6a8c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -11,6 +11,29 @@ we won't be able to respond to pull requests submitted through GitHub.
Bugs should be filed [on Launchpad](https://bugs.launchpad.net/swift),
not in GitHub's issue tracker.
+
+Swift Design Principles
+=======================
+
+ * [The Zen of Python](http://legacy.python.org/dev/peps/pep-0020/)
+ * Simple Scales
+ * Minimal dependencies
+ * Re-use existing tools and libraries when reasonable
+ * Leverage the economies of scale
+ * Small, loosely coupled RESTful services
+ * No single points of failure
+ * Start with the use case
+ * ... then design from the cluster operator up
+ * If you haven't argued about it, you don't have the right answer yet :)
+ * If it is your first implementation, you probably aren't done yet :)
+
+Please don't feel offended by difference of opinion. Be prepared to advocate
+for your change and iterate on it based on feedback. Reach out to other people
+working on the project on
+[IRC](http://eavesdrop.openstack.org/irclogs/%23openstack-swift/) or the
+[mailing list](http://lists.openstack.org/pipermail/openstack-dev/) - we want
+to help.
+
Recommended workflow
====================
diff --git a/bin/swift-drive-audit b/bin/swift-drive-audit
index 589b255f2..ea1735799 100755
--- a/bin/swift-drive-audit
+++ b/bin/swift-drive-audit
@@ -176,6 +176,7 @@ if __name__ == '__main__':
if not devices:
logger.error("Error: No devices found!")
recon_errors = {}
+ total_errors = 0
for device in devices:
recon_errors[device['mount_point']] = 0
errors = get_errors(error_re, log_file_pattern, minutes, logger)
@@ -198,8 +199,10 @@ if __name__ == '__main__':
comment_fstab(mount_point)
unmounts += 1
recon_errors[mount_point] = count
+ total_errors += count
recon_file = recon_cache_path + "/drive.recon"
dump_recon_cache(recon_errors, recon_file, logger)
+ dump_recon_cache({'drive_audit_errors': total_errors}, recon_file, logger)
if unmounts == 0:
logger.info("No drives were unmounted")
diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst
index 76b5126be..241eda6cf 100644
--- a/doc/source/development_guidelines.rst
+++ b/doc/source/development_guidelines.rst
@@ -70,6 +70,35 @@ When using the 'in-process test' mode, the optional in-memory
object server may be selected by setting the environment variable
``SWIFT_TEST_IN_MEMORY_OBJ`` to a true value.
+The 'in-process test' mode searches for ``proxy-server.conf`` and
+``swift.conf`` config files from which it copies config options and overrides
+some options to suit in process testing. The search will first look for config
+files in a ``<custom_conf_source_dir>`` that may optionally be specified using
+the environment variable::
+
+ SWIFT_TEST_IN_PROCESS_CONF_DIR=<custom_conf_source_dir>
+
+If ``SWIFT_TEST_IN_PROCESS_CONF_DIR`` is not set, or if a config file is not
+found in ``<custom_conf_source_dir>``, the search will then look in the
+``etc/`` directory in the source tree. If the config file is still not found,
+the corresponding sample config file from ``etc/`` is used (e.g.
+``proxy-server.conf-sample`` or ``swift.conf-sample``).
+
+The environment variable ``SWIFT_TEST_POLICY`` may be set to specify
+a particular storage policy *name* that will be used for testing. When set,
+this policy must exist in the ``swift.conf`` file and its corresponding ring
+file must exist in ``<custom_conf_source_dir>`` (if specified) or ``etc/``. The
+test setup will set the specified policy to be the default and use its ring
+file properties for constructing the test object ring. This allows in-process
+testing to be run against various policy types and ring files.
+
+For example, this command would run the in-process mode functional tests
+using config files found in ``$HOME/my_tests`` and policy 'silver'::
+
+ SWIFT_TEST_IN_PROCESS=1 SWIFT_TEST_IN_PROCESS_CONF_DIR=$HOME/my_tests \
+ SWIFT_TEST_POLICY=silver tox -e func
+
+
------------
Coding Style
------------
diff --git a/doc/source/howto_installmultinode.rst b/doc/source/howto_installmultinode.rst
index 7b37cb077..8ab73232d 100644
--- a/doc/source/howto_installmultinode.rst
+++ b/doc/source/howto_installmultinode.rst
@@ -2,7 +2,7 @@
Instructions for a Multiple Server Swift Installation
=====================================================
-Please refer to the latest offical
+Please refer to the latest official
`Openstack Installation Guides <http://docs.openstack.org/#install-guides>`_
for the most up-to-date documentation.
diff --git a/requirements.txt b/requirements.txt
index 60091eab5..27d507901 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
dnspython>=1.9.4
-eventlet>=0.9.15
+eventlet>=0.16.1,!=0.17.0
greenlet>=0.3.1
netifaces>=0.5,!=0.10.0,!=0.10.1
pastedeploy>=1.3.3
diff --git a/swift/cli/recon.py b/swift/cli/recon.py
index 676973c41..b67e2678d 100755
--- a/swift/cli/recon.py
+++ b/swift/cli/recon.py
@@ -330,6 +330,27 @@ class SwiftRecon(object):
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
+ def driveaudit_check(self, hosts):
+ """
+ Obtain and print drive audit error statistics
+
+ :param hosts: set of hosts to check. in the format of:
+ set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]
+ """
+ scan = {}
+ recon = Scout("driveaudit", self.verbose, self.suppress_errors,
+ self.timeout)
+ print("[%s] Checking drive-audit errors" % self._ptime())
+ for url, response, status in self.pool.imap(recon.scout, hosts):
+ if status == 200:
+ scan[url] = response['drive_audit_errors']
+ stats = self._gen_stats(scan.values(), 'drive_audit_errors')
+ if stats['reported'] > 0:
+ self._print_stats(stats)
+ else:
+ print("[drive_audit_errors] - No hosts returned valid data.")
+ print("=" * 79)
+
def umount_check(self, hosts):
"""
Check for and print unmounted drives
@@ -930,6 +951,8 @@ class SwiftRecon(object):
"local copy")
args.add_option('--sockstat', action="store_true",
help="Get cluster socket usage stats")
+ args.add_option('--driveaudit', action="store_true",
+ help="Get drive audit error stats")
args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.')
args.add_option('--all', action="store_true",
@@ -992,6 +1015,7 @@ class SwiftRecon(object):
self.quarantine_check(hosts)
self.socket_usage(hosts)
self.server_type_check(hosts)
+ self.driveaudit_check(hosts)
else:
if options.async:
if self.server_type == 'object':
@@ -1033,6 +1057,8 @@ class SwiftRecon(object):
self.quarantine_check(hosts)
if options.sockstat:
self.socket_usage(hosts)
+ if options.driveaudit:
+ self.driveaudit_check(hosts)
def main():
diff --git a/swift/common/middleware/recon.py b/swift/common/middleware/recon.py
index c51249335..88d5243a4 100644
--- a/swift/common/middleware/recon.py
+++ b/swift/common/middleware/recon.py
@@ -53,6 +53,8 @@ class ReconMiddleware(object):
'container.recon')
self.account_recon_cache = os.path.join(self.recon_cache_path,
'account.recon')
+ self.drive_recon_cache = os.path.join(self.recon_cache_path,
+ 'drive.recon')
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.rings = [self.account_ring_path, self.container_ring_path]
@@ -124,6 +126,11 @@ class ReconMiddleware(object):
return self._from_recon_cache(['async_pending'],
self.object_recon_cache)
+ def get_driveaudit_error(self):
+ """get # of drive audit errors"""
+ return self._from_recon_cache(['drive_audit_errors'],
+ self.drive_recon_cache)
+
def get_replication_info(self, recon_type):
"""get replication info"""
if recon_type == 'account':
@@ -359,6 +366,8 @@ class ReconMiddleware(object):
content = self.get_socket_info()
elif rcheck == "version":
content = self.get_version()
+ elif rcheck == "driveaudit":
+ content = self.get_driveaudit_error()
else:
content = "Invalid path: %s" % req.path
return Response(request=req, status="404 Not Found",
diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py
index a2b07128a..93f55ff03 100644
--- a/swift/common/middleware/tempauth.py
+++ b/swift/common/middleware/tempauth.py
@@ -399,7 +399,7 @@ class TempAuth(object):
s = base64.encodestring(hmac.new(key, msg, sha1).digest()).strip()
if s != sign:
return None
- groups = self._get_user_groups(account, account_user)
+ groups = self._get_user_groups(account, account_user, account_id)
return groups
diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py
index 245e3c325..f33eda539 100644
--- a/swift/common/storage_policy.py
+++ b/swift/common/storage_policy.py
@@ -114,6 +114,12 @@ class StoragePolicy(object):
return
self.object_ring = Ring(swift_dir, ring_name=self.ring_name)
+ def get_options(self):
+ """Return the valid conf file options for this policy."""
+ return {'name': self.name,
+ 'default': self.is_default,
+ 'deprecated': self.is_deprecated}
+
class StoragePolicyCollection(object):
"""
diff --git a/swift/common/swob.py b/swift/common/swob.py
index 1c43316ba..729cdd96f 100644
--- a/swift/common/swob.py
+++ b/swift/common/swob.py
@@ -929,6 +929,10 @@ class Request(object):
return '/' + entity_path
@property
+ def is_chunked(self):
+ return 'chunked' in self.headers.get('transfer-encoding', '')
+
+ @property
def url(self):
"Provides the full url of the request"
return self.host_url + self.path_qs
diff --git a/swift/container/reconciler.py b/swift/container/reconciler.py
index 12c81be9d..ba896ae52 100644
--- a/swift/container/reconciler.py
+++ b/swift/container/reconciler.py
@@ -137,7 +137,7 @@ def get_reconciler_content_type(op):
raise ValueError('invalid operation type %r' % op)
-def get_row_to_q_entry_translater(broker):
+def get_row_to_q_entry_translator(broker):
account = broker.account
container = broker.container
op_type = {
@@ -145,7 +145,7 @@ def get_row_to_q_entry_translater(broker):
1: get_reconciler_content_type('delete'),
}
- def translater(obj_info):
+ def translator(obj_info):
name = get_reconciler_obj_name(obj_info['storage_policy_index'],
account, container,
obj_info['name'])
@@ -157,7 +157,7 @@ def get_row_to_q_entry_translater(broker):
'content_type': op_type[obj_info['deleted']],
'size': 0,
}
- return translater
+ return translator
def add_to_reconciler_queue(container_ring, account, container, obj,
diff --git a/swift/container/replicator.py b/swift/container/replicator.py
index 897453525..9fa32e496 100644
--- a/swift/container/replicator.py
+++ b/swift/container/replicator.py
@@ -22,7 +22,7 @@ from eventlet import Timeout
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
- get_reconciler_container_name, get_row_to_q_entry_translater)
+ get_reconciler_container_name, get_row_to_q_entry_translator)
from swift.common import db_replicator
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import DeviceUnavailable
@@ -166,14 +166,14 @@ class ContainerReplicator(db_replicator.Replicator):
misplaced = broker.get_misplaced_since(point, self.per_diff)
if not misplaced:
return max_sync
- translater = get_row_to_q_entry_translater(broker)
+ translator = get_row_to_q_entry_translator(broker)
errors = False
low_sync = point
while misplaced:
batches = defaultdict(list)
for item in misplaced:
container = get_reconciler_container_name(item['created_at'])
- batches[container].append(translater(item))
+ batches[container].append(translator(item))
for container, item_list in batches.items():
success = self.feed_reconciler(container, item_list)
if not success:
diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot
index 2bbfa5ddc..f7a79f723 100644
--- a/swift/locale/swift.pot
+++ b/swift/locale/swift.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.2.2.post123\n"
+"Project-Id-Version: swift 2.2.2.post136\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-03-18 06:11+0000\n"
+"POT-Creation-Date: 2015-03-24 06:06+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -1091,9 +1091,9 @@ msgid "Account"
msgstr ""
#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731
-#: swift/proxy/controllers/obj.py:191 swift/proxy/controllers/obj.py:318
-#: swift/proxy/controllers/obj.py:358 swift/proxy/controllers/obj.py:376
-#: swift/proxy/controllers/obj.py:502
+#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319
+#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382
+#: swift/proxy/controllers/obj.py:593
msgid "Object"
msgstr ""
@@ -1124,7 +1124,7 @@ msgid "Trying to %(method)s %(path)s"
msgstr ""
#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037
-#: swift/proxy/controllers/obj.py:350 swift/proxy/controllers/obj.py:390
+#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402
msgid "ERROR Insufficient Storage"
msgstr ""
@@ -1143,76 +1143,64 @@ msgstr ""
msgid "%(type)s returning 503 for %(statuses)s"
msgstr ""
-#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:117
+#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118
msgid "Container"
msgstr ""
-#: swift/proxy/controllers/obj.py:319
+#: swift/proxy/controllers/obj.py:320
#, python-format
msgid "Trying to write to %s"
msgstr ""
-#: swift/proxy/controllers/obj.py:353
+#: swift/proxy/controllers/obj.py:361
#, python-format
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr ""
-#: swift/proxy/controllers/obj.py:359
+#: swift/proxy/controllers/obj.py:367
#, python-format
msgid "Expect: 100-continue on %s"
msgstr ""
-#: swift/proxy/controllers/obj.py:377
+#: swift/proxy/controllers/obj.py:383
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr ""
-#: swift/proxy/controllers/obj.py:394
+#: swift/proxy/controllers/obj.py:406
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr ""
-#: swift/proxy/controllers/obj.py:665
+#: swift/proxy/controllers/obj.py:663
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr ""
-#: swift/proxy/controllers/obj.py:674
+#: swift/proxy/controllers/obj.py:672
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
-#: swift/proxy/controllers/obj.py:682
-#, python-format
-msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:713
-#, python-format
-msgid ""
-"Object PUT exceptions during send, %(conns)s/%(nodes)s required "
-"connections"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:724
+#: swift/proxy/controllers/obj.py:755
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr ""
-#: swift/proxy/controllers/obj.py:729
+#: swift/proxy/controllers/obj.py:762
msgid "ERROR Exception causing client disconnect"
msgstr ""
-#: swift/proxy/controllers/obj.py:734
+#: swift/proxy/controllers/obj.py:767
msgid "Client disconnected without sending enough data"
msgstr ""
-#: swift/proxy/controllers/obj.py:743
+#: swift/proxy/controllers/obj.py:813
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr ""
-#: swift/proxy/controllers/obj.py:747
+#: swift/proxy/controllers/obj.py:817
msgid "Object PUT"
msgstr ""
diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po
index a27e9fa95..b123396e0 100644
--- a/swift/locale/zh_CN/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-03-18 06:11+0000\n"
-"PO-Revision-Date: 2015-03-16 20:28+0000\n"
+"POT-Creation-Date: 2015-03-24 06:06+0000\n"
+"PO-Revision-Date: 2015-03-24 04:20+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Chinese (China) "
"(http://www.transifex.com/projects/p/swift/language/zh_CN/)\n"
@@ -1111,9 +1111,9 @@ msgid "Account"
msgstr "账号"
#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731
-#: swift/proxy/controllers/obj.py:191 swift/proxy/controllers/obj.py:318
-#: swift/proxy/controllers/obj.py:358 swift/proxy/controllers/obj.py:376
-#: swift/proxy/controllers/obj.py:502
+#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319
+#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382
+#: swift/proxy/controllers/obj.py:593
msgid "Object"
msgstr "对象"
@@ -1144,7 +1144,7 @@ msgid "Trying to %(method)s %(path)s"
msgstr "尝试执行%(method)s %(path)s"
#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037
-#: swift/proxy/controllers/obj.py:350 swift/proxy/controllers/obj.py:390
+#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402
msgid "ERROR Insufficient Storage"
msgstr "错误 存储空间不足"
@@ -1163,76 +1163,64 @@ msgstr ""
msgid "%(type)s returning 503 for %(statuses)s"
msgstr "%(type)s 返回 503 在 %(statuses)s"
-#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:117
+#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118
msgid "Container"
msgstr "容器"
-#: swift/proxy/controllers/obj.py:319
+#: swift/proxy/controllers/obj.py:320
#, python-format
msgid "Trying to write to %s"
msgstr "尝试执行书写%s"
-#: swift/proxy/controllers/obj.py:353
+#: swift/proxy/controllers/obj.py:361
#, python-format
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr ""
-#: swift/proxy/controllers/obj.py:359
+#: swift/proxy/controllers/obj.py:367
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "已知:100-continue on %s"
-#: swift/proxy/controllers/obj.py:377
+#: swift/proxy/controllers/obj.py:383
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "尝试执行获取最后的PUT状态%s"
-#: swift/proxy/controllers/obj.py:394
+#: swift/proxy/controllers/obj.py:406
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s"
-#: swift/proxy/controllers/obj.py:665
+#: swift/proxy/controllers/obj.py:663
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr "对象PUT返还 412,%(statuses)r "
-#: swift/proxy/controllers/obj.py:674
+#: swift/proxy/controllers/obj.py:672
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
-#: swift/proxy/controllers/obj.py:682
-#, python-format
-msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
-msgstr "对象PUT返回503,%(conns)s/%(nodes)s 请求连接"
-
-#: swift/proxy/controllers/obj.py:713
-#, python-format
-msgid ""
-"Object PUT exceptions during send, %(conns)s/%(nodes)s required "
-"connections"
-msgstr "对象PUT发送时出现异常,%(conns)s/%(nodes)s请求连接"
-
-#: swift/proxy/controllers/obj.py:724
+#: swift/proxy/controllers/obj.py:755
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr "错误 客户读取超时(%ss)"
-#: swift/proxy/controllers/obj.py:729
+#: swift/proxy/controllers/obj.py:762
msgid "ERROR Exception causing client disconnect"
msgstr "错误 异常导致客户端中断连接"
-#: swift/proxy/controllers/obj.py:734
+#: swift/proxy/controllers/obj.py:767
msgid "Client disconnected without sending enough data"
msgstr "客户中断 尚未发送足够"
-#: swift/proxy/controllers/obj.py:743
+#: swift/proxy/controllers/obj.py:813
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr "对象服务器返还%s不匹配etags"
-#: swift/proxy/controllers/obj.py:747
+#: swift/proxy/controllers/obj.py:817
msgid "Object PUT"
msgstr "对象上传"
diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py
index 9697d9d8f..a8d14dfa2 100644
--- a/swift/obj/diskfile.py
+++ b/swift/obj/diskfile.py
@@ -1605,14 +1605,22 @@ class DiskFile(object):
"""
if not exists(self._tmpdir):
mkdirs(self._tmpdir)
- fd, tmppath = mkstemp(dir=self._tmpdir)
+ try:
+ fd, tmppath = mkstemp(dir=self._tmpdir)
+ except OSError as err:
+ if err.errno in (errno.ENOSPC, errno.EDQUOT):
+ # No more inodes in filesystem
+ raise DiskFileNoSpace()
+ raise
dfw = None
try:
if size is not None and size > 0:
try:
fallocate(fd, size)
- except OSError:
- raise DiskFileNoSpace()
+ except OSError as err:
+ if err.errno in (errno.ENOSPC, errno.EDQUOT):
+ raise DiskFileNoSpace()
+ raise
dfw = DiskFileWriter(self._name, self._datadir, fd, tmppath,
self._bytes_per_sync, self._threadpool)
yield dfw
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index 1b9bcab61..70b0d0cf6 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -52,12 +52,13 @@ from swift.common.http import (
HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR,
HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE,
HTTP_PRECONDITION_FAILED, HTTP_CONFLICT)
+from swift.common.storage_policy import POLICIES
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, \
- HTTPClientDisconnect, HeaderKeyDict
+ HTTPClientDisconnect, HeaderKeyDict, HTTPException
from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \
remove_items, copy_header_subset
@@ -321,7 +322,13 @@ class ObjectController(Controller):
def _connect_put_node(self, nodes, part, path, headers,
logger_thread_locals):
- """Method for a file PUT connect"""
+ """
+ Make a connection for a replicated object.
+
+ Connects to the first working node that it finds in node_iter
+ and sends over the request headers. Returns an HTTPConnection
+ object to handle the rest of the streaming.
+ """
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
@@ -350,36 +357,41 @@ class ObjectController(Controller):
self.app.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(resp.status):
self.app.error_occurred(
- node, _('ERROR %(status)d Expect: 100-continue '
- 'From Object Server') % {
- 'status': resp.status})
+ node,
+ _('ERROR %(status)d Expect: 100-continue '
+ 'From Object Server') % {
+ 'status': resp.status})
except (Exception, Timeout):
self.app.exception_occurred(
node, _('Object'),
_('Expect: 100-continue on %s') % path)
+ def _await_response(self, conn, **kwargs):
+ with Timeout(self.app.node_timeout):
+ if conn.resp:
+ return conn.resp
+ else:
+ return conn.getresponse()
+
+ def _get_conn_response(self, conn, req, **kwargs):
+ try:
+ resp = self._await_response(conn, **kwargs)
+ return (conn, resp)
+ except (Exception, Timeout):
+ self.app.exception_occurred(
+ conn.node, _('Object'),
+ _('Trying to get final status of PUT to %s') % req.path)
+ return (None, None)
+
def _get_put_responses(self, req, conns, nodes):
statuses = []
reasons = []
bodies = []
etags = set()
- def get_conn_response(conn):
- try:
- with Timeout(self.app.node_timeout):
- if conn.resp:
- return (conn, conn.resp)
- else:
- return (conn, conn.getresponse())
- except (Exception, Timeout):
- self.app.exception_occurred(
- conn.node, _('Object'),
- _('Trying to get final status of PUT to %s') % req.path)
- return (None, None)
-
pile = GreenAsyncPile(len(conns))
for conn in conns:
- pile.spawn(get_conn_response, conn)
+ pile.spawn(self._get_conn_response, conn, req)
def _handle_response(conn, response):
statuses.append(response.status)
@@ -440,56 +452,135 @@ class ObjectController(Controller):
return req, delete_at_container, delete_at_part, delete_at_nodes
- @public
- @cors_validation
- @delay_denial
- def PUT(self, req):
- """HTTP PUT request handler."""
- if req.if_none_match is not None and '*' not in req.if_none_match:
- # Sending an etag with if-none-match isn't currently supported
- return HTTPBadRequest(request=req, content_type='text/plain',
- body='If-None-Match only supports *')
- container_info = self.container_info(
- self.account_name, self.container_name, req)
- policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
- container_info['storage_policy'])
- obj_ring = self.app.get_object_ring(policy_index)
+ def _handle_copy_request(self, req):
+ """
+ This method handles copying objects based on values set in the headers
+ 'X-Copy-From' and 'X-Copy-From-Account'
- # pass the policy index to storage nodes via req header
- req.headers['X-Backend-Storage-Policy-Index'] = policy_index
- container_partition = container_info['partition']
- containers = container_info['nodes']
- req.acl = container_info['write_acl']
- req.environ['swift_sync_key'] = container_info['sync_key']
- object_versions = container_info['versions']
- if 'swift.authorize' in req.environ:
- aresp = req.environ['swift.authorize'](req)
- if aresp:
- return aresp
+ This method was added as part of the refactoring of the PUT method and
+ the functionality is expected to be moved to middleware
+ """
+ if req.environ.get('swift.orig_req_method', req.method) != 'POST':
+ req.environ.setdefault('swift.log_info', []).append(
+ 'x-copy-from:%s' % req.headers['X-Copy-From'])
+ ver, acct, _rest = req.split_path(2, 3, True)
+ src_account_name = req.headers.get('X-Copy-From-Account', None)
+ if src_account_name:
+ src_account_name = check_account_format(req, src_account_name)
+ else:
+ src_account_name = acct
+ src_container_name, src_obj_name = check_copy_from_header(req)
+ source_header = '/%s/%s/%s/%s' % (
+ ver, src_account_name, src_container_name, src_obj_name)
+ source_req = req.copy_get()
+
+ # make sure the source request uses it's container_info
+ source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
+ source_req.path_info = source_header
+ source_req.headers['X-Newest'] = 'true'
+
+ orig_obj_name = self.object_name
+ orig_container_name = self.container_name
+ orig_account_name = self.account_name
+ sink_req = Request.blank(req.path_info,
+ environ=req.environ, headers=req.headers)
+
+ self.object_name = src_obj_name
+ self.container_name = src_container_name
+ self.account_name = src_account_name
+ source_resp = self.GET(source_req)
+
+ # This gives middlewares a way to change the source; for example,
+ # this lets you COPY a SLO manifest and have the new object be the
+ # concatenation of the segments (like what a GET request gives
+ # the client), not a copy of the manifest file.
+ hook = req.environ.get(
+ 'swift.copy_hook',
+ (lambda source_req, source_resp, sink_req: source_resp))
+ source_resp = hook(source_req, source_resp, sink_req)
+
+ # reset names
+ self.object_name = orig_obj_name
+ self.container_name = orig_container_name
+ self.account_name = orig_account_name
+
+ if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
+ # this is a bit of ugly code, but I'm willing to live with it
+ # until copy request handling moves to middleware
+ return source_resp, None, None, None
+ if source_resp.content_length is None:
+ # This indicates a transfer-encoding: chunked source object,
+ # which currently only happens because there are more than
+ # CONTAINER_LISTING_LIMIT segments in a segmented object. In
+ # this case, we're going to refuse to do the server-side copy.
+ raise HTTPRequestEntityTooLarge(request=req)
+ if source_resp.content_length > constraints.MAX_FILE_SIZE:
+ raise HTTPRequestEntityTooLarge(request=req)
+
+ data_source = iter(source_resp.app_iter)
+ sink_req.content_length = source_resp.content_length
+ sink_req.etag = source_resp.etag
+
+ # we no longer need the X-Copy-From header
+ del sink_req.headers['X-Copy-From']
+ if 'X-Copy-From-Account' in sink_req.headers:
+ del sink_req.headers['X-Copy-From-Account']
+ if not req.content_type_manually_set:
+ sink_req.headers['Content-Type'] = \
+ source_resp.headers['Content-Type']
+ if config_true_value(
+ sink_req.headers.get('x-fresh-metadata', 'false')):
+ # post-as-copy: ignore new sysmeta, copy existing sysmeta
+ condition = lambda k: is_sys_meta('object', k)
+ remove_items(sink_req.headers, condition)
+ copy_header_subset(source_resp, sink_req, condition)
+ else:
+ # copy/update existing sysmeta and user meta
+ copy_headers_into(source_resp, sink_req)
+ copy_headers_into(req, sink_req)
- if not containers:
- return HTTPNotFound(request=req)
+ # copy over x-static-large-object for POSTs and manifest copies
+ if 'X-Static-Large-Object' in source_resp.headers and \
+ req.params.get('multipart-manifest') == 'get':
+ sink_req.headers['X-Static-Large-Object'] = \
+ source_resp.headers['X-Static-Large-Object']
- # Sometimes the 'content-type' header exists, but is set to None.
- content_type_manually_set = True
- detect_content_type = \
- config_true_value(req.headers.get('x-detect-content-type'))
- if detect_content_type or not req.headers.get('content-type'):
- guessed_type, _junk = mimetypes.guess_type(req.path_info)
- req.headers['Content-Type'] = guessed_type or \
- 'application/octet-stream'
- if detect_content_type:
- req.headers.pop('x-detect-content-type')
- else:
- content_type_manually_set = False
+ req = sink_req
- error_response = check_object_creation(req, self.object_name) or \
- check_content_type(req)
- if error_response:
- return error_response
+ def update_response(req, resp):
+ acct, path = source_resp.environ['PATH_INFO'].split('/', 3)[2:4]
+ resp.headers['X-Copied-From-Account'] = quote(acct)
+ resp.headers['X-Copied-From'] = quote(path)
+ if 'last-modified' in source_resp.headers:
+ resp.headers['X-Copied-From-Last-Modified'] = \
+ source_resp.headers['last-modified']
+ copy_headers_into(req, resp)
+ return resp
+ # this is a bit of ugly code, but I'm willing to live with it
+ # until copy request handling moves to middleware
+ return None, req, data_source, update_response
+
+ def _handle_object_versions(self, req):
+ """
+ This method handles versionining of objects in containers that
+ have the feature enabled.
+
+ When a new PUT request is sent, the proxy checks for previous versions
+ of that same object name. If found, it is copied to a different
+ container and the new version is stored in its place.
+
+ This method was added as part of the PUT method refactoring and the
+ functionality is expected to be moved to middleware
+ """
+ container_info = self.container_info(
+ self.account_name, self.container_name, req)
+ policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
+ container_info['storage_policy'])
+ obj_ring = self.app.get_object_ring(policy_index)
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
+ object_versions = container_info['versions']
# do a HEAD request for checking object versions
if object_versions and not req.environ.get('swift_versioned_copy'):
@@ -502,20 +593,6 @@ class ObjectController(Controller):
hreq, _('Object'), obj_ring, partition,
hreq.swift_entity_path)
- # Used by container sync feature
- if 'x-timestamp' in req.headers:
- try:
- req_timestamp = Timestamp(req.headers['X-Timestamp'])
- except ValueError:
- return HTTPBadRequest(
- request=req, content_type='text/plain',
- body='X-Timestamp should be a UNIX timestamp float value; '
- 'was %r' % req.headers['x-timestamp'])
- req.headers['X-Timestamp'] = req_timestamp.internal
- else:
- req.headers['X-Timestamp'] = Timestamp(time.time()).internal
-
- if object_versions and not req.environ.get('swift_versioned_copy'):
is_manifest = 'X-Object-Manifest' in req.headers or \
'X-Object-Manifest' in hresp.headers
if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
@@ -543,120 +620,41 @@ class ObjectController(Controller):
copy_resp = self.COPY(copy_req)
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
- return HTTPPreconditionFailed(request=req)
+ raise HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
- return HTTPServiceUnavailable(request=req)
+ raise HTTPServiceUnavailable(request=req)
- reader = req.environ['wsgi.input'].read
- data_source = iter(lambda: reader(self.app.client_chunk_size), '')
- source_header = req.headers.get('X-Copy-From')
- source_resp = None
- if source_header:
- if req.environ.get('swift.orig_req_method', req.method) != 'POST':
- req.environ.setdefault('swift.log_info', []).append(
- 'x-copy-from:%s' % source_header)
- ver, acct, _rest = req.split_path(2, 3, True)
- src_account_name = req.headers.get('X-Copy-From-Account', None)
- if src_account_name:
- src_account_name = check_account_format(req, src_account_name)
- else:
- src_account_name = acct
- src_container_name, src_obj_name = check_copy_from_header(req)
- source_header = '/%s/%s/%s/%s' % (
- ver, src_account_name, src_container_name, src_obj_name)
- source_req = req.copy_get()
-
- # make sure the source request uses it's container_info
- source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
- source_req.path_info = source_header
- source_req.headers['X-Newest'] = 'true'
- orig_obj_name = self.object_name
- orig_container_name = self.container_name
- orig_account_name = self.account_name
- self.object_name = src_obj_name
- self.container_name = src_container_name
- self.account_name = src_account_name
- sink_req = Request.blank(req.path_info,
- environ=req.environ, headers=req.headers)
- source_resp = self.GET(source_req)
-
- # This gives middlewares a way to change the source; for example,
- # this lets you COPY a SLO manifest and have the new object be the
- # concatenation of the segments (like what a GET request gives
- # the client), not a copy of the manifest file.
- hook = req.environ.get(
- 'swift.copy_hook',
- (lambda source_req, source_resp, sink_req: source_resp))
- source_resp = hook(source_req, source_resp, sink_req)
-
- if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
- return source_resp
- self.object_name = orig_obj_name
- self.container_name = orig_container_name
- self.account_name = orig_account_name
- data_source = iter(source_resp.app_iter)
- sink_req.content_length = source_resp.content_length
- if sink_req.content_length is None:
- # This indicates a transfer-encoding: chunked source object,
- # which currently only happens because there are more than
- # CONTAINER_LISTING_LIMIT segments in a segmented object. In
- # this case, we're going to refuse to do the server-side copy.
- return HTTPRequestEntityTooLarge(request=req)
- if sink_req.content_length > constraints.MAX_FILE_SIZE:
- return HTTPRequestEntityTooLarge(request=req)
- sink_req.etag = source_resp.etag
-
- # we no longer need the X-Copy-From header
- del sink_req.headers['X-Copy-From']
- if 'X-Copy-From-Account' in sink_req.headers:
- del sink_req.headers['X-Copy-From-Account']
- if not content_type_manually_set:
- sink_req.headers['Content-Type'] = \
- source_resp.headers['Content-Type']
- if config_true_value(
- sink_req.headers.get('x-fresh-metadata', 'false')):
- # post-as-copy: ignore new sysmeta, copy existing sysmeta
- condition = lambda k: is_sys_meta('object', k)
- remove_items(sink_req.headers, condition)
- copy_header_subset(source_resp, sink_req, condition)
+ def _update_content_type(self, req):
+ # Sometimes the 'content-type' header exists, but is set to None.
+ req.content_type_manually_set = True
+ detect_content_type = \
+ config_true_value(req.headers.get('x-detect-content-type'))
+ if detect_content_type or not req.headers.get('content-type'):
+ guessed_type, _junk = mimetypes.guess_type(req.path_info)
+ req.headers['Content-Type'] = guessed_type or \
+ 'application/octet-stream'
+ if detect_content_type:
+ req.headers.pop('x-detect-content-type')
else:
- # copy/update existing sysmeta and user meta
- copy_headers_into(source_resp, sink_req)
- copy_headers_into(req, sink_req)
-
- # copy over x-static-large-object for POSTs and manifest copies
- if 'X-Static-Large-Object' in source_resp.headers and \
- req.params.get('multipart-manifest') == 'get':
- sink_req.headers['X-Static-Large-Object'] = \
- source_resp.headers['X-Static-Large-Object']
-
- req = sink_req
-
- req, delete_at_container, delete_at_part, \
- delete_at_nodes = self._config_obj_expiration(req)
-
- node_iter = GreenthreadSafeIterator(
- self.iter_nodes_local_first(obj_ring, partition))
- pile = GreenPile(len(nodes))
- te = req.headers.get('transfer-encoding', '')
- chunked = ('chunked' in te)
-
- outgoing_headers = self._backend_requests(
- req, len(nodes), container_partition, containers,
- delete_at_container, delete_at_part, delete_at_nodes)
+ req.content_type_manually_set = False
- for nheaders in outgoing_headers:
- # RFC2616:8.2.3 disallows 100-continue without a body
- if (req.content_length > 0) or chunked:
- nheaders['Expect'] = '100-continue'
- pile.spawn(self._connect_put_node, node_iter, partition,
- req.swift_entity_path, nheaders,
- self.app.logger.thread_locals)
-
- conns = [conn for conn in pile if conn]
- min_conns = quorum_size(len(nodes))
+ def _update_x_timestamp(self, req):
+ # Used by container sync feature
+ if 'x-timestamp' in req.headers:
+ try:
+ req_timestamp = Timestamp(req.headers['X-Timestamp'])
+ except ValueError:
+ raise HTTPBadRequest(
+ request=req, content_type='text/plain',
+ body='X-Timestamp should be a UNIX timestamp float value; '
+ 'was %r' % req.headers['x-timestamp'])
+ req.headers['X-Timestamp'] = req_timestamp.internal
+ else:
+ req.headers['X-Timestamp'] = Timestamp(time.time()).internal
+ return None
+ def _check_failure_put_connections(self, conns, req, nodes):
if req.if_none_match is not None and '*' in req.if_none_match:
statuses = [conn.resp.status for conn in conns if conn.resp]
if HTTP_PRECONDITION_FAILED in statuses:
@@ -664,7 +662,7 @@ class ObjectController(Controller):
self.app.logger.debug(
_('Object PUT returning 412, %(statuses)r'),
{'statuses': statuses})
- return HTTPPreconditionFailed(request=req)
+ raise HTTPPreconditionFailed(request=req)
if any(conn for conn in conns if conn.resp and
conn.resp.status == HTTP_CONFLICT):
@@ -675,14 +673,44 @@ class ObjectController(Controller):
'%(req_timestamp)s <= %(timestamps)r'),
{'req_timestamp': req.timestamp.internal,
'timestamps': ', '.join(timestamps)})
- return HTTPAccepted(request=req)
+ raise HTTPAccepted(request=req)
+
+ min_conns = quorum_size(len(nodes))
+ self._check_min_conn(req, conns, min_conns)
+
+ def _get_put_connections(self, req, nodes, partition, outgoing_headers,
+ policy, expect):
+ """
+ Establish connections to storage nodes for PUT request
+ """
+ obj_ring = policy.object_ring
+ node_iter = GreenthreadSafeIterator(
+ self.iter_nodes_local_first(obj_ring, partition))
+ pile = GreenPile(len(nodes))
+
+ for nheaders in outgoing_headers:
+ if expect:
+ nheaders['Expect'] = '100-continue'
+ pile.spawn(self._connect_put_node, node_iter, partition,
+ req.swift_entity_path, nheaders,
+ self.app.logger.thread_locals)
+
+ conns = [conn for conn in pile if conn]
+
+ return conns
+
+ def _check_min_conn(self, req, conns, min_conns, msg=None):
+ msg = msg or 'Object PUT returning 503, %(conns)s/%(nodes)s ' \
+ 'required connections'
if len(conns) < min_conns:
- self.app.logger.error(
- _('Object PUT returning 503, %(conns)s/%(nodes)s '
- 'required connections'),
- {'conns': len(conns), 'nodes': min_conns})
- return HTTPServiceUnavailable(request=req)
+ self.app.logger.error((msg),
+ {'conns': len(conns), 'nodes': min_conns})
+ raise HTTPServiceUnavailable(request=req)
+
+ def _transfer_data(self, req, data_source, conns, nodes):
+ min_conns = quorum_size(len(nodes))
+
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
@@ -695,48 +723,90 @@ class ObjectController(Controller):
try:
chunk = next(data_source)
except StopIteration:
- if chunked:
+ if req.is_chunked:
for conn in conns:
conn.queue.put('0\r\n\r\n')
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
- return HTTPRequestEntityTooLarge(request=req)
+ raise HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put(
'%x\r\n%s\r\n' % (len(chunk), chunk)
- if chunked else chunk)
+ if req.is_chunked else chunk)
else:
+ conn.close()
conns.remove(conn)
- if len(conns) < min_conns:
- self.app.logger.error(_(
- 'Object PUT exceptions during'
- ' send, %(conns)s/%(nodes)s required connections'),
- {'conns': len(conns), 'nodes': min_conns})
- return HTTPServiceUnavailable(request=req)
+ self._check_min_conn(
+ req, conns, min_conns,
+ msg='Object PUT exceptions during'
+ ' send, %(conns)s/%(nodes)s required connections')
for conn in conns:
if conn.queue.unfinished_tasks:
conn.queue.join()
conns = [conn for conn in conns if not conn.failed]
+ self._check_min_conn(
+ req, conns, min_conns,
+ msg='Object PUT exceptions after last send, '
+ '%(conns)s/%(nodes)s required connections')
except ChunkReadTimeout as err:
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
- return HTTPRequestTimeout(request=req)
+ raise HTTPRequestTimeout(request=req)
+ except HTTPException:
+ raise
except (Exception, Timeout):
self.app.logger.exception(
_('ERROR Exception causing client disconnect'))
- return HTTPClientDisconnect(request=req)
+ raise HTTPClientDisconnect(request=req)
if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
- return HTTPClientDisconnect(request=req)
+ raise HTTPClientDisconnect(request=req)
+
+ def _store_object(self, req, data_source, nodes, partition,
+ outgoing_headers):
+ """
+ Store a replicated object.
+
+ This method is responsible for establishing connection
+ with storage nodes and sending object to each one of those
+ nodes. After sending the data, the "best" reponse will be
+ returned based on statuses from all connections
+ """
+ policy_idx = req.headers.get('X-Backend-Storage-Policy-Index')
+ policy = POLICIES.get_by_index(policy_idx)
+ if not nodes:
+ return HTTPNotFound()
+
+ # RFC2616:8.2.3 disallows 100-continue without a body
+ if (req.content_length > 0) or req.is_chunked:
+ expect = True
+ else:
+ expect = False
+ conns = self._get_put_connections(req, nodes, partition,
+ outgoing_headers, policy, expect)
+
+ try:
+ # check that a minimum number of connections were established and
+ # meet all the correct conditions set in the request
+ self._check_failure_put_connections(conns, req, nodes)
- statuses, reasons, bodies, etags = self._get_put_responses(req, conns,
- nodes)
+ # transfer data
+ self._transfer_data(req, data_source, conns, nodes)
+
+ # get responses
+ statuses, reasons, bodies, etags = self._get_put_responses(
+ req, conns, nodes)
+ except HTTPException as resp:
+ return resp
+ finally:
+ for conn in conns:
+ conn.close()
if len(etags) > 1:
self.app.logger.error(
@@ -745,14 +815,6 @@ class ObjectController(Controller):
etag = etags.pop() if len(etags) else None
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'), etag=etag)
- if source_header:
- acct, path = source_header.split('/', 3)[2:4]
- resp.headers['X-Copied-From-Account'] = quote(acct)
- resp.headers['X-Copied-From'] = quote(path)
- if 'last-modified' in source_resp.headers:
- resp.headers['X-Copied-From-Last-Modified'] = \
- source_resp.headers['last-modified']
- copy_headers_into(req, resp)
resp.last_modified = math.ceil(
float(Timestamp(req.headers['X-Timestamp'])))
return resp
@@ -760,6 +822,79 @@ class ObjectController(Controller):
@public
@cors_validation
@delay_denial
+ def PUT(self, req):
+ """HTTP PUT request handler."""
+ if req.if_none_match is not None and '*' not in req.if_none_match:
+ # Sending an etag with if-none-match isn't currently supported
+ return HTTPBadRequest(request=req, content_type='text/plain',
+ body='If-None-Match only supports *')
+ container_info = self.container_info(
+ self.account_name, self.container_name, req)
+ policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
+ container_info['storage_policy'])
+ obj_ring = self.app.get_object_ring(policy_index)
+ container_nodes = container_info['nodes']
+ container_partition = container_info['partition']
+ partition, nodes = obj_ring.get_nodes(
+ self.account_name, self.container_name, self.object_name)
+
+ # pass the policy index to storage nodes via req header
+ req.headers['X-Backend-Storage-Policy-Index'] = policy_index
+ req.acl = container_info['write_acl']
+ req.environ['swift_sync_key'] = container_info['sync_key']
+
+ # is request authorized
+ if 'swift.authorize' in req.environ:
+ aresp = req.environ['swift.authorize'](req)
+ if aresp:
+ return aresp
+
+ if not container_info['nodes']:
+ return HTTPNotFound(request=req)
+
+ # update content type in case it is missing
+ self._update_content_type(req)
+
+ # check constraints on object name and request headers
+ error_response = check_object_creation(req, self.object_name) or \
+ check_content_type(req)
+ if error_response:
+ return error_response
+
+ self._update_x_timestamp(req)
+
+ # check if versioning is enabled and handle copying previous version
+ self._handle_object_versions(req)
+
+ # check if request is a COPY of an existing object
+ source_header = req.headers.get('X-Copy-From')
+ if source_header:
+ error_response, req, data_source, update_response = \
+ self._handle_copy_request(req)
+ if error_response:
+ return error_response
+ else:
+ reader = req.environ['wsgi.input'].read
+ data_source = iter(lambda: reader(self.app.client_chunk_size), '')
+ update_response = lambda req, resp: resp
+
+ # check if object is set to be automaticaly deleted (i.e. expired)
+ req, delete_at_container, delete_at_part, \
+ delete_at_nodes = self._config_obj_expiration(req)
+
+ # add special headers to be handled by storage nodes
+ outgoing_headers = self._backend_requests(
+ req, len(nodes), container_partition, container_nodes,
+ delete_at_container, delete_at_part, delete_at_nodes)
+
+ # send object to storage nodes
+ resp = self._store_object(
+ req, data_source, nodes, partition, outgoing_headers)
+ return update_response(req, resp)
+
+ @public
+ @cors_validation
+ @delay_denial
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_info = self.container_info(
diff --git a/test/functional/__init__.py b/test/functional/__init__.py
index c4d764268..4a8cb80bd 100644
--- a/test/functional/__init__.py
+++ b/test/functional/__init__.py
@@ -23,6 +23,7 @@ import eventlet
import eventlet.debug
import functools
import random
+from ConfigParser import ConfigParser, NoSectionError
from time import time, sleep
from httplib import HTTPException
from urlparse import urlparse
@@ -32,6 +33,7 @@ from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from swift.common.middleware.memcache import MemcacheMiddleware
+from swift.common.storage_policy import parse_storage_policies, PolicyError
from test import get_config
from test.functional.swift_test_client import Account, Connection, \
@@ -50,6 +52,9 @@ from swift.container import server as container_server
from swift.obj import server as object_server, mem_server as mem_object_server
import swift.proxy.controllers.obj
+
+DEBUG = True
+
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
@@ -99,7 +104,7 @@ orig_hash_path_suff_pref = ('', '')
orig_swift_conf_name = None
in_process = False
-_testdir = _test_servers = _test_sockets = _test_coros = None
+_testdir = _test_servers = _test_coros = None
class FakeMemcacheMiddleware(MemcacheMiddleware):
@@ -113,29 +118,187 @@ class FakeMemcacheMiddleware(MemcacheMiddleware):
self.memcache = FakeMemcache()
-# swift.conf contents for in-process functional test runs
-functests_swift_conf = '''
-[swift-hash]
-swift_hash_path_suffix = inprocfunctests
-swift_hash_path_prefix = inprocfunctests
+class InProcessException(BaseException):
+ pass
-[swift-constraints]
-max_file_size = %d
-''' % ((8 * 1024 * 1024) + 2) # 8 MB + 2
+def _info(msg):
+ print >> sys.stderr, msg
-def in_process_setup(the_object_server=object_server):
- print >>sys.stderr, 'IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS'
- print >>sys.stderr, 'Using object_server: %s' % the_object_server.__name__
- _dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
- os.pardir, os.pardir, os.pardir))
- proxy_conf = os.path.join(_dir, 'etc', 'proxy-server.conf-sample')
- if os.path.exists(proxy_conf):
- print >>sys.stderr, 'Using proxy-server config from %s' % proxy_conf
+def _debug(msg):
+ if DEBUG:
+ _info('DEBUG: ' + msg)
+
+
+def _in_process_setup_swift_conf(swift_conf_src, testdir):
+ # override swift.conf contents for in-process functional test runs
+ conf = ConfigParser()
+ conf.read(swift_conf_src)
+ try:
+ section = 'swift-hash'
+ conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
+ conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
+ section = 'swift-constraints'
+ max_file_size = (8 * 1024 * 1024) + 2 # 8 MB + 2
+ conf.set(section, 'max_file_size', max_file_size)
+ except NoSectionError:
+ msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
+ raise InProcessException(msg)
+
+ test_conf_file = os.path.join(testdir, 'swift.conf')
+ with open(test_conf_file, 'w') as fp:
+ conf.write(fp)
+
+ return test_conf_file
+
+
+def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
+ """
+ Look for a file first in conf_src_dir, if it exists, otherwise optionally
+ look in the source tree sample 'etc' dir.
+
+ :param conf_src_dir: Directory in which to search first for conf file. May
+ be None
+ :param conf_file_name: Name of conf file
+ :param use_sample: If True and the conf_file_name is not found, then return
+ any sample conf file found in the source tree sample
+ 'etc' dir by appending '-sample' to conf_file_name
+ :returns: Path to conf file
+ :raises InProcessException: If no conf file is found
+ """
+ dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
+ os.pardir, os.pardir, os.pardir,
+ 'etc'))
+ conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
+ conf_file_path = os.path.join(conf_src_dir, conf_file_name)
+ if os.path.exists(conf_file_path):
+ return conf_file_path
+
+ if use_sample:
+ # fall back to using the corresponding sample conf file
+ conf_file_name += '-sample'
+ conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
+ if os.path.exists(conf_file_path):
+ return conf_file_path
+
+ msg = 'Failed to find config file %s' % conf_file_name
+ raise InProcessException(msg)
+
+
+def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
+ """
+ If SWIFT_TEST_POLICY is set:
+ - look in swift.conf file for specified policy
+ - move this to be policy-0 but preserving its options
+ - copy its ring file to test dir, changing its devices to suit
+ in process testing, and renaming it to suit policy-0
+ Otherwise, create a default ring file.
+ """
+ conf = ConfigParser()
+ conf.read(swift_conf)
+ sp_prefix = 'storage-policy:'
+
+ try:
+ # policy index 0 will be created if no policy exists in conf
+ policies = parse_storage_policies(conf)
+ except PolicyError as e:
+ raise InProcessException(e)
+
+ # clear all policies from test swift.conf before adding test policy back
+ for policy in policies:
+ conf.remove_section(sp_prefix + str(policy.idx))
+
+ policy_specified = os.environ.get('SWIFT_TEST_POLICY')
+ if policy_specified:
+ policy_to_test = policies.get_by_name(policy_specified)
+ if policy_to_test is None:
+ raise InProcessException('Failed to find policy name "%s"'
+ % policy_specified)
+ _info('Using specified policy %s' % policy_to_test.name)
else:
- print >>sys.stderr, 'Failed to find conf file %s' % proxy_conf
- return
+ policy_to_test = policies.default
+ _info('Defaulting to policy %s' % policy_to_test.name)
+
+ # make policy_to_test be policy index 0 and default for the test config
+ sp_zero_section = sp_prefix + '0'
+ conf.add_section(sp_zero_section)
+ for (k, v) in policy_to_test.get_options().items():
+ conf.set(sp_zero_section, k, v)
+ conf.set(sp_zero_section, 'default', True)
+
+ with open(swift_conf, 'w') as fp:
+ conf.write(fp)
+
+ # look for a source ring file
+ ring_file_src = ring_file_test = 'object.ring.gz'
+ if policy_to_test.idx:
+ ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
+ try:
+ ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
+ use_sample=False)
+ except InProcessException as e:
+ if policy_specified:
+ raise InProcessException('Failed to find ring file %s'
+ % ring_file_src)
+ ring_file_src = None
+
+ ring_file_test = os.path.join(testdir, ring_file_test)
+ if ring_file_src:
+ # copy source ring file to a policy-0 test ring file, re-homing servers
+ _info('Using source ring file %s' % ring_file_src)
+ ring_data = ring.RingData.load(ring_file_src)
+ obj_sockets = []
+ for dev in ring_data.devs:
+ device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
+ utils.mkdirs(os.path.join(_testdir, 'sda1'))
+ utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
+ obj_socket = eventlet.listen(('localhost', 0))
+ obj_sockets.append(obj_socket)
+ dev['port'] = obj_socket.getsockname()[1]
+ dev['ip'] = '127.0.0.1'
+ dev['device'] = device
+ dev['replication_port'] = dev['port']
+ dev['replication_ip'] = dev['ip']
+ ring_data.save(ring_file_test)
+ else:
+ # make default test ring, 2 replicas, 4 partitions, 2 devices
+ _info('No source object ring file, creating 2rep/4part/2dev ring')
+ obj_sockets = [eventlet.listen(('localhost', 0)) for _ in (0, 1)]
+ ring_data = ring.RingData(
+ [[0, 1, 0, 1], [1, 0, 1, 0]],
+ [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
+ 'port': obj_sockets[0].getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
+ 'port': obj_sockets[1].getsockname()[1]}],
+ 30)
+ with closing(GzipFile(ring_file_test, 'wb')) as f:
+ pickle.dump(ring_data, f)
+
+ for dev in ring_data.devs:
+ _debug('Ring file dev: %s' % dev)
+
+ return obj_sockets
+
+
+def in_process_setup(the_object_server=object_server):
+ _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
+ _info('Using object_server class: %s' % the_object_server.__name__)
+ conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
+
+ if conf_src_dir is not None:
+ if not os.path.isdir(conf_src_dir):
+ msg = 'Config source %s is not a dir' % conf_src_dir
+ raise InProcessException(msg)
+ _info('Using config source dir: %s' % conf_src_dir)
+
+ # If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
+ # prefer config files from there, otherwise read config from source tree
+ # sample files. A mixture of files from the two sources is allowed.
+ proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
+ _info('Using proxy config from %s' % proxy_conf)
+ swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
+ _info('Using swift config from %s' % swift_conf_src)
monkey_patch_mimetools()
@@ -148,9 +311,8 @@ def in_process_setup(the_object_server=object_server):
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
- swift_conf = os.path.join(_testdir, "swift.conf")
- with open(swift_conf, "w") as scfp:
- scfp.write(functests_swift_conf)
+ swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
+ obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)
global orig_swift_conf_name
orig_swift_conf_name = utils.SWIFT_CONF_FILE
@@ -221,11 +383,6 @@ def in_process_setup(the_object_server=object_server):
acc2lis = eventlet.listen(('localhost', 0))
con1lis = eventlet.listen(('localhost', 0))
con2lis = eventlet.listen(('localhost', 0))
- obj1lis = eventlet.listen(('localhost', 0))
- obj2lis = eventlet.listen(('localhost', 0))
- global _test_sockets
- _test_sockets = \
- (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
@@ -243,14 +400,6 @@ def in_process_setup(the_object_server=object_server):
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
- object_ring_path = os.path.join(_testdir, 'object.ring.gz')
- with closing(GzipFile(object_ring_path, 'wb')) as f:
- pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
- [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
- 'port': obj1lis.getsockname()[1]},
- {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
- 'port': obj2lis.getsockname()[1]}], 30),
- f)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
@@ -270,10 +419,13 @@ def in_process_setup(the_object_server=object_server):
config, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
config, logger=debug_logger('cont2'))
- obj1srv = the_object_server.ObjectController(
- config, logger=debug_logger('obj1'))
- obj2srv = the_object_server.ObjectController(
- config, logger=debug_logger('obj2'))
+
+ objsrvs = [
+ (obj_sockets[index],
+ the_object_server.ObjectController(
+ config, logger=debug_logger('obj%d' % (index + 1))))
+ for index in range(len(obj_sockets))
+ ]
logger = debug_logger('proxy')
@@ -283,7 +435,10 @@ def in_process_setup(the_object_server=object_server):
with mock.patch('swift.common.utils.get_logger', get_logger):
with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
FakeMemcacheMiddleware):
- app = loadapp(proxy_conf, global_conf=config)
+ try:
+ app = loadapp(proxy_conf, global_conf=config)
+ except Exception as e:
+ raise InProcessException(e)
nl = utils.NullLogger()
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
@@ -291,11 +446,13 @@ def in_process_setup(the_object_server=object_server):
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
- obj1spa = eventlet.spawn(eventlet.wsgi.server, obj1lis, obj1srv, nl)
- obj2spa = eventlet.spawn(eventlet.wsgi.server, obj2lis, obj2srv, nl)
+
+ objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
+ for objsrv in objsrvs]
+
global _test_coros
_test_coros = \
- (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
+ (prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)
# Create accounts "test" and "test2"
def create_account(act):
@@ -396,8 +553,13 @@ def setup_package():
if in_process:
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
in_mem_obj = utils.config_true_value(in_mem_obj_env)
- in_process_setup(the_object_server=(
- mem_object_server if in_mem_obj else object_server))
+ try:
+ in_process_setup(the_object_server=(
+ mem_object_server if in_mem_obj else object_server))
+ except InProcessException as exc:
+ print >> sys.stderr, ('Exception during in-process setup: %s'
+ % str(exc))
+ raise
global web_front_end
web_front_end = config.get('web_front_end', 'integral')
diff --git a/test/probe/common.py b/test/probe/common.py
index 62988835c..3cea02241 100644
--- a/test/probe/common.py
+++ b/test/probe/common.py
@@ -198,7 +198,8 @@ def get_ring(ring_name, required_replicas, required_devices,
def get_policy(**kwargs):
kwargs.setdefault('is_deprecated', False)
- # go thru the policies and make sure they match the requirements of kwargs
+ # go through the policies and make sure they match the
+ # requirements of kwargs
for policy in POLICIES:
# TODO: for EC, pop policy type here and check it first
matches = True
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index 0e10d3bac..da7212c98 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -733,6 +733,9 @@ def fake_http_connect(*code_iter, **kwargs):
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
+ def close(self):
+ pass
+
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), list):
diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py
index 82978a830..d231fea74 100644
--- a/test/unit/account/test_backend.py
+++ b/test/unit/account/test_backend.py
@@ -747,7 +747,7 @@ def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):
The AccountBroker initialze() function before we added the
policy stat table. Used by test_policy_table_creation() to
make sure that the AccountBroker will correctly add the table
- for cases where the DB existed before the policy suport was added.
+ for cases where the DB existed before the policy support was added.
:param conn: DB connection object
:param put_timestamp: put timestamp
diff --git a/test/unit/cli/test_info.py b/test/unit/cli/test_info.py
index 2766520fd..fede0d30f 100644
--- a/test/unit/cli/test_info.py
+++ b/test/unit/cli/test_info.py
@@ -386,6 +386,17 @@ class TestPrintObjFullMeta(TestCliInfoBase):
print_obj(self.datafile, swift_dir=self.testdir)
self.assertTrue('/objects-1/' in out.getvalue())
+ def test_print_obj_meta_and_ts_files(self):
+ # verify that print_obj will also read from meta and ts files
+ base = os.path.splitext(self.datafile)[0]
+ for ext in ('.meta', '.ts'):
+ test_file = '%s%s' % (base, ext)
+ os.link(self.datafile, test_file)
+ out = StringIO()
+ with mock.patch('sys.stdout', out):
+ print_obj(test_file, swift_dir=self.testdir)
+ self.assertTrue('/objects-1/' in out.getvalue())
+
def test_print_obj_no_ring(self):
no_rings_dir = os.path.join(self.testdir, 'no_rings_here')
os.mkdir(no_rings_dir)
diff --git a/test/unit/cli/test_recon.py b/test/unit/cli/test_recon.py
index e9ad45d2c..7009be851 100644
--- a/test/unit/cli/test_recon.py
+++ b/test/unit/cli/test_recon.py
@@ -293,6 +293,43 @@ class TestRecon(unittest.TestCase):
% ex)
self.assertFalse(expected)
+ def test_drive_audit_check(self):
+ hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020),
+ ('127.0.0.1', 6030), ('127.0.0.1', 6040)]
+ # sample json response from http://<host>:<port>/recon/driveaudit
+ responses = {6010: {'drive_audit_errors': 15},
+ 6020: {'drive_audit_errors': 0},
+ 6030: {'drive_audit_errors': 257},
+ 6040: {'drive_audit_errors': 56}}
+ # <low> <high> <avg> <total> <Failed> <no_result> <reported>
+ expected = (0, 257, 82.0, 328, 0.0, 0, 4)
+
+ def mock_scout_driveaudit(app, host):
+ url = 'http://%s:%s/recon/driveaudit' % host
+ response = responses[host[1]]
+ status = 200
+ return url, response, status
+
+ stdout = StringIO()
+ patches = [
+ mock.patch('swift.cli.recon.Scout.scout', mock_scout_driveaudit),
+ mock.patch('sys.stdout', new=stdout),
+ ]
+ with nested(*patches):
+ self.recon_instance.driveaudit_check(hosts)
+
+ output = stdout.getvalue()
+ r = re.compile("\[drive_audit_errors(.*)\](.*)")
+ lines = output.splitlines()
+ self.assertTrue(lines)
+ for line in lines:
+ m = r.match(line)
+ if m:
+ self.assertEquals(m.group(2),
+ " low: %s, high: %s, avg: %s, total: %s,"
+ " Failed: %s%%, no_result: %s, reported: %s"
+ % expected)
+
class TestReconCommands(unittest.TestCase):
def setUp(self):
diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py
index 50abbd2d1..a46c4ae6c 100644
--- a/test/unit/common/middleware/test_recon.py
+++ b/test/unit/common/middleware/test_recon.py
@@ -172,6 +172,9 @@ class FakeRecon(object):
def fake_sockstat(self):
return {'sockstattest': "1"}
+ def fake_driveaudit(self):
+ return {'driveaudittest': "1"}
+
def nocontent(self):
return None
@@ -843,6 +846,15 @@ class TestReconSuccess(TestCase):
(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
+ def test_get_driveaudit_info(self):
+ from_cache_response = {'drive_audit_errors': 7}
+ self.fakecache.fakeout = from_cache_response
+ rv = self.app.get_driveaudit_error()
+ self.assertEquals(self.fakecache.fakeout_calls,
+ [((['drive_audit_errors'],
+ '/var/cache/swift/drive.recon'), {})])
+ self.assertEquals(rv, {'drive_audit_errors': 7})
+
class TestReconMiddleware(unittest.TestCase):
@@ -871,6 +883,7 @@ class TestReconMiddleware(unittest.TestCase):
self.app.get_swift_conf_md5 = self.frecon.fake_swiftconfmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
+ self.app.get_driveaudit_error = self.frecon.fake_driveaudit
def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}']
@@ -1098,5 +1111,12 @@ class TestReconMiddleware(unittest.TestCase):
resp = self.app(req.environ, start_response)
self.assertEquals(resp, 'FAKE APP')
+ def test_recon_get_driveaudit(self):
+ get_driveaudit_resp = ['{"driveaudittest": "1"}']
+ req = Request.blank('/recon/driveaudit',
+ environ={'REQUEST_METHOD': 'GET'})
+ resp = self.app(req.environ, start_response)
+ self.assertEquals(resp, get_driveaudit_resp)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/common/middleware/test_tempauth.py b/test/unit/common/middleware/test_tempauth.py
index 394668b47..b9be84bb9 100644
--- a/test/unit/common/middleware/test_tempauth.py
+++ b/test/unit/common/middleware/test_tempauth.py
@@ -14,9 +14,10 @@
# limitations under the License.
import unittest
-from contextlib import contextmanager
+from contextlib import contextmanager, nested
from base64 import b64encode
from time import time
+import mock
from swift.common.middleware import tempauth as auth
from swift.common.middleware.acl import format_acl
@@ -266,6 +267,25 @@ class TestAuth(unittest.TestCase):
self.assertEquals(req.environ['swift.authorize'],
local_auth.denied_response)
+ def test_auth_with_s3_authorization(self):
+ local_app = FakeApp()
+ local_auth = auth.filter_factory(
+ {'user_s3_s3': 's3 .admin'})(local_app)
+ req = self._make_request('/v1/AUTH_s3',
+ headers={'X-Auth-Token': 't',
+ 'AUTHORIZATION': 'AWS s3:s3:pass'})
+
+ with nested(mock.patch('base64.urlsafe_b64decode'),
+ mock.patch('base64.encodestring')) as (msg, sign):
+ msg.return_value = ''
+ sign.return_value = 'pass'
+ resp = req.get_response(local_auth)
+
+ self.assertEquals(resp.status_int, 404)
+ self.assertEquals(local_app.calls, 1)
+ self.assertEquals(req.environ['swift.authorize'],
+ local_auth.authorize)
+
def test_auth_no_reseller_prefix_no_token(self):
# Check that normally we set up a call back to our authorize.
local_auth = auth.filter_factory({'reseller_prefix': ''})(FakeApp())
diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py
index e154a11b6..21fed77ee 100644
--- a/test/unit/common/test_storage_policy.py
+++ b/test/unit/common/test_storage_policy.py
@@ -512,6 +512,19 @@ class TestStoragePolicies(unittest.TestCase):
for policy in POLICIES:
self.assertEqual(POLICIES[int(policy)], policy)
+ def test_storage_policy_get_options(self):
+ policy = StoragePolicy(1, 'gold', True, False)
+ self.assertEqual({'name': 'gold',
+ 'default': True,
+ 'deprecated': False},
+ policy.get_options())
+
+ policy = StoragePolicy(1, 'gold', False, True)
+ self.assertEqual({'name': 'gold',
+ 'default': False,
+ 'deprecated': True},
+ policy.get_options())
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index 22fd17aa1..cc6747555 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -1595,16 +1595,56 @@ class TestDiskFile(unittest.TestCase):
def test_create_prealloc_oserror(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz')
+ for e in (errno.ENOSPC, errno.EDQUOT):
+ with mock.patch("swift.obj.diskfile.fallocate",
+ mock.MagicMock(side_effect=OSError(
+ e, os.strerror(e)))):
+ try:
+ with df.create(size=200):
+ pass
+ except DiskFileNoSpace:
+ pass
+ else:
+ self.fail("Expected exception DiskFileNoSpace")
+
+ # Other OSErrors must not be raised as DiskFileNoSpace
with mock.patch("swift.obj.diskfile.fallocate",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
try:
with df.create(size=200):
pass
- except DiskFileNoSpace:
+ except OSError:
+ pass
+ else:
+ self.fail("Expected exception OSError")
+
+ def test_create_mkstemp_no_space(self):
+ df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
+ 'xyz')
+ for e in (errno.ENOSPC, errno.EDQUOT):
+ with mock.patch("swift.obj.diskfile.mkstemp",
+ mock.MagicMock(side_effect=OSError(
+ e, os.strerror(e)))):
+ try:
+ with df.create(size=200):
+ pass
+ except DiskFileNoSpace:
+ pass
+ else:
+ self.fail("Expected exception DiskFileNoSpace")
+
+ # Other OSErrors must not be raised as DiskFileNoSpace
+ with mock.patch("swift.obj.diskfile.mkstemp",
+ mock.MagicMock(side_effect=OSError(
+ errno.EACCES, os.strerror(errno.EACCES)))):
+ try:
+ with df.create(size=200):
+ pass
+ except OSError:
pass
else:
- self.fail("Expected exception DiskFileNoSpace")
+ self.fail("Expected exception OSError")
def test_create_close_oserror(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py
index c8974deb4..1823a9014 100755
--- a/test/unit/obj/test_server.py
+++ b/test/unit/obj/test_server.py
@@ -4041,7 +4041,7 @@ class TestObjectController(unittest.TestCase):
return ''
def fake_fallocate(fd, size):
- raise OSError(42, 'Unable to fallocate(%d)' % size)
+ raise OSError(errno.ENOSPC, os.strerror(errno.ENOSPC))
orig_fallocate = diskfile.fallocate
try:
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 6d5bf0ed5..39d637d8c 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -665,7 +665,7 @@ class TestProxyServer(unittest.TestCase):
class MyApp(proxy_server.Application):
def get_controller(self, path):
- raise Exception('this shouldnt be caught')
+ raise Exception('this shouldn\'t be caught')
app = MyApp(None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
@@ -993,7 +993,10 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
- res = method(req)
+ try:
+ res = method(req)
+ except HTTPException as res:
+ pass
self.assertEquals(res.status_int, expected)
# repeat test
@@ -1003,7 +1006,10 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
- res = method(req)
+ try:
+ res = method(req)
+ except HTTPException as res:
+ pass
self.assertEquals(res.status_int, expected)
@unpatch_policies
@@ -1734,7 +1740,10 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, -1), 201) # connect exc
@@ -1763,7 +1772,10 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
@@ -1805,7 +1817,10 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(str(expected))],
str(expected))
@@ -3391,7 +3406,10 @@ class TestObjectController(unittest.TestCase):
self.app.update_request(req)
self.app.memcache.store = {}
- resp = controller.PUT(req)
+ try:
+ resp = controller.PUT(req)
+ except HTTPException as resp:
+ pass
self.assertEquals(resp.status_int, 413)
def test_basic_COPY(self):
@@ -3632,7 +3650,10 @@ class TestObjectController(unittest.TestCase):
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
- resp = controller.COPY(req)
+ try:
+ resp = controller.COPY(req)
+ except HTTPException as resp:
+ pass
self.assertEquals(resp.status_int, 413)
@_limit_max_file_size
@@ -3656,7 +3677,10 @@ class TestObjectController(unittest.TestCase):
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
- resp = controller.COPY(req)
+ try:
+ resp = controller.COPY(req)
+ except HTTPException as resp:
+ pass
self.assertEquals(resp.status_int, 413)
def test_COPY_newest(self):
@@ -3698,41 +3722,46 @@ class TestObjectController(unittest.TestCase):
def test_COPY_delete_at(self):
with save_globals():
- given_headers = {}
+ backend_requests = []
- def fake_connect_put_node(nodes, part, path, headers,
- logger_thread_locals):
- given_headers.update(headers)
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ backend_requests.append((method, path, headers))
controller = proxy_server.ObjectController(self.app, 'a',
'c', 'o')
- controller._connect_put_node = fake_connect_put_node
- set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
+ set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
+ give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
self.app.update_request(req)
- controller.COPY(req)
- self.assertEquals(given_headers.get('X-Delete-At'), '9876543210')
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
+ resp = controller.COPY(req)
+ self.assertEqual(201, resp.status_int) # sanity
+ for method, path, given_headers in backend_requests:
+ if method != 'PUT':
+ continue
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ '9876543210')
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
def test_COPY_account_delete_at(self):
with save_globals():
- given_headers = {}
+ backend_requests = []
- def fake_connect_put_node(nodes, part, path, headers,
- logger_thread_locals):
- given_headers.update(headers)
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ backend_requests.append((method, path, headers))
controller = proxy_server.ObjectController(self.app, 'a',
'c', 'o')
- controller._connect_put_node = fake_connect_put_node
- set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
+ give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -3740,12 +3769,17 @@ class TestObjectController(unittest.TestCase):
'Destination-Account': 'a1'})
self.app.update_request(req)
- controller.COPY(req)
- self.assertEquals(given_headers.get('X-Delete-At'), '9876543210')
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
+ resp = controller.COPY(req)
+ self.assertEqual(201, resp.status_int) # sanity
+ for method, path, given_headers in backend_requests:
+ if method != 'PUT':
+ continue
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ '9876543210')
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
def test_chunked_put(self):
diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py
index c3b673108..d80f2855e 100644
--- a/test/unit/proxy/test_sysmeta.py
+++ b/test/unit/proxy/test_sysmeta.py
@@ -70,6 +70,9 @@ class FakeServerConnection(WSGIContext):
def send(self, data):
self.data += data
+ def close(self):
+ pass
+
def __call__(self, ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
self.path = quote('/' + device + '/' + str(partition) + path)